xref: /openbsd-src/sys/dev/ic/aic7xxx_openbsd.c (revision 4c1e55dc91edd6e69ccc60ce855900fbc12cf34f)
1 /*	$OpenBSD: aic7xxx_openbsd.c,v 1.51 2011/07/17 22:46:48 matthew Exp $	*/
2 /*	$NetBSD: aic7xxx_osm.c,v 1.14 2003/11/02 11:07:44 wiz Exp $	*/
3 
4 /*
5  * Bus independent OpenBSD shim for the aic7xxx based adaptec SCSI controllers
6  *
7  * Copyright (c) 1994-2001 Justin T. Gibbs.
8  * Copyright (c) 2001-2002 Steve Murphree, Jr.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU Public License ("GPL").
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
36  *
37  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
38  */
39 /*
40  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
41  */
42 
43 #include <dev/ic/aic7xxx_openbsd.h>
44 #include <dev/ic/aic7xxx_inline.h>
45 
46 #ifndef AHC_TMODE_ENABLE
47 #define AHC_TMODE_ENABLE 0
48 #endif
49 
50 
51 void	ahc_action(struct scsi_xfer *);
52 void	ahc_execute_scb(void *, bus_dma_segment_t *, int);
53 int	ahc_poll(struct ahc_softc *, int);
54 void	ahc_setup_data(struct ahc_softc *, struct scsi_xfer *, struct scb *);
55 
56 void	ahc_minphys(struct buf *, struct scsi_link *);
57 void	ahc_adapter_req_set_xfer_mode(struct ahc_softc *, struct scb *);
58 
59 
60 struct cfdriver ahc_cd = {
61 	NULL, "ahc", DV_DULL
62 };
63 
64 static struct scsi_adapter ahc_switch =
65 {
66 	ahc_action,
67 	ahc_minphys,
68 	0,
69 	0,
70 };
71 
72 /*
73  * Attach all the sub-devices we can find
74  */
75 int
76 ahc_attach(struct ahc_softc *ahc)
77 {
78 	struct scsibus_attach_args saa;
79 	int s;
80 
81         s = splbio();
82 
83 	/*
84 	 * fill in the prototype scsi_links.
85 	 */
86 	ahc->sc_channel.adapter_target = ahc->our_id;
87 	if (ahc->features & AHC_WIDE)
88 		ahc->sc_channel.adapter_buswidth = 16;
89 	ahc->sc_channel.adapter_softc = ahc;
90 	ahc->sc_channel.adapter = &ahc_switch;
91 	ahc->sc_channel.openings = 16;
92 
93 	if (ahc->features & AHC_TWIN) {
94 		/* Configure the second scsi bus */
95 		ahc->sc_channel_b = ahc->sc_channel;
96 		ahc->sc_channel_b.adapter_target = ahc->our_id_b;
97 	}
98 
99 #ifndef DEBUG
100 	if (bootverbose) {
101 		char ahc_info[256];
102 		ahc_controller_info(ahc, ahc_info, sizeof ahc_info);
103 		printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
104 	}
105 #endif
106 
107 	ahc_intr_enable(ahc, TRUE);
108 
109 	if (ahc->flags & AHC_RESET_BUS_A)
110 		ahc_reset_channel(ahc, 'A', TRUE);
111 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
112 		ahc_reset_channel(ahc, 'B', TRUE);
113 
114 	bzero(&saa, sizeof(saa));
115 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
116 		saa.saa_sc_link = &ahc->sc_channel;
117 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
118 		    &saa, scsiprint);
119 		if (ahc->features & AHC_TWIN) {
120 			saa.saa_sc_link = &ahc->sc_channel_b;
121 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
122 			    &saa, scsiprint);
123 		}
124 	} else {
125 		if (ahc->features & AHC_TWIN) {
126 			saa.saa_sc_link = &ahc->sc_channel_b;
127 			ahc->sc_child = config_found((void *)&ahc->sc_dev,
128 			    &saa, scsiprint);
129 		}
130 		saa.saa_sc_link = &ahc->sc_channel;
131 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
132 		    &saa, scsiprint);
133 	}
134 
135 	splx(s);
136 	return (1);
137 }
138 
139 /*
140  * Catch an interrupt from the adapter
141  */
142 int
143 ahc_platform_intr(void *arg)
144 {
145 	struct	ahc_softc *ahc = (struct ahc_softc *)arg;
146 
147 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
148 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
149 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
150 
151 	return ahc_intr(ahc);
152 }
153 
154 /*
155  * We have an scb which has been processed by the
156  * adaptor, now we look to see how the operation
157  * went.
158  */
159 void
160 ahc_done(struct ahc_softc *ahc, struct scb *scb)
161 {
162 	struct scsi_xfer *xs = scb->xs;
163 	int s;
164 
165 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
166 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
167 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
168 
169 	LIST_REMOVE(scb, pending_links);
170 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
171 		struct scb_tailq *untagged_q;
172 		int target_offset;
173 
174 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
175 		untagged_q = &ahc->untagged_queues[target_offset];
176 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
177 		scb->flags &= ~SCB_UNTAGGEDQ;
178 		ahc_run_untagged_queue(ahc, untagged_q);
179 	}
180 
181 	timeout_del(&xs->stimeout);
182 
183 	if (xs->datalen) {
184 		int op;
185 
186 		if ((xs->flags & SCSI_DATA_IN) != 0)
187 			op = BUS_DMASYNC_POSTREAD;
188 		else
189 			op = BUS_DMASYNC_POSTWRITE;
190 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
191 				scb->dmamap->dm_mapsize, op);
192 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
193 	}
194 
195 	/* Translate the CAM status code to a SCSI error code. */
196 	switch (xs->error) {
197 	case CAM_SCSI_STATUS_ERROR:
198 	case CAM_REQ_INPROG:
199 	case CAM_REQ_CMP:
200 		switch (xs->status) {
201 		case SCSI_TASKSET_FULL:
202 			xs->error = XS_NO_CCB;
203 			break;
204 		case SCSI_BUSY:
205 			xs->error = XS_BUSY;
206 			break;
207 		case SCSI_CHECK:
208 		case SCSI_TERMINATED:
209 			if ((scb->flags & SCB_SENSE) == 0) {
210 				/* CHECK on CHECK? */
211 				xs->error = XS_DRIVER_STUFFUP;
212 			} else
213 				xs->error = XS_NOERROR;
214 			break;
215 		default:
216 			xs->error = XS_NOERROR;
217 			break;
218 		}
219 		break;
220 	case CAM_BUSY:
221 		xs->error = XS_BUSY;
222 		break;
223 	case CAM_CMD_TIMEOUT:
224 		xs->error = XS_TIMEOUT;
225 		break;
226 	case CAM_BDR_SENT:
227 	case CAM_SCSI_BUS_RESET:
228 		xs->error = XS_RESET;
229 		break;
230 	case CAM_REQUEUE_REQ:
231 		xs->error = XS_NO_CCB;
232 		break;
233 	case CAM_SEL_TIMEOUT:
234 		xs->error = XS_SELTIMEOUT;
235 		break;
236 	default:
237 		xs->error = XS_DRIVER_STUFFUP;
238 		break;
239 	}
240 
241 	/* Don't clobber any existing error state */
242 	if (xs->error != XS_NOERROR) {
243 	  /* Don't clobber any existing error state */
244 	} else if ((scb->flags & SCB_SENSE) != 0) {
245 		/*
246 		 * We performed autosense retrieval.
247 		 *
248 		 * Zero any sense not transferred by the
249 		 * device.  The SCSI spec mandates that any
250 		 * untransferred data should be assumed to be
251 		 * zero.  Complete the 'bounce' of sense information
252 		 * through buffers accessible via bus-space by
253 		 * copying it into the clients csio.
254 		 */
255 		memset(&xs->sense, 0, sizeof(struct scsi_sense_data));
256 		memcpy(&xs->sense, ahc_get_sense_buf(ahc, scb),
257 		    aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK);
258 		xs->error = XS_SENSE;
259 	}
260 
261         s = splbio();
262 	ahc_free_scb(ahc, scb);
263 	scsi_done(xs);
264         splx(s);
265 }
266 
267 void
268 ahc_minphys(struct buf *bp, struct scsi_link *sl)
269 {
270 	/*
271 	 * Even though the card can transfer up to 16megs per command
272 	 * we are limited by the number of segments in the dma segment
273 	 * list that we can hold.  The worst case is that all pages are
274 	 * discontinuous physically, hence the "page per segment" limit
275 	 * enforced here.
276 	 */
277 	if (bp->b_bcount > ((AHC_NSEG - 1) * PAGE_SIZE)) {
278 		bp->b_bcount = ((AHC_NSEG - 1) * PAGE_SIZE);
279 	}
280 	minphys(bp);
281 }
282 
283 void
284 ahc_action(struct scsi_xfer *xs)
285 {
286 	struct ahc_softc *ahc;
287 	struct scb *scb;
288 	struct hardware_scb *hscb;
289 	u_int target_id;
290 	u_int our_id;
291 	int s;
292 
293 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("ahc_action\n"));
294 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
295 
296 	target_id = xs->sc_link->target;
297 	our_id = SCSI_SCSI_ID(ahc, xs->sc_link);
298 
299 	/*
300 	 * get an scb to use.
301 	 */
302 	s = splbio();
303 	scb = ahc_get_scb(ahc);
304 	splx(s);
305 	if (scb == NULL) {
306 		xs->error = XS_NO_CCB;
307 		scsi_done(xs);
308 		return;
309 	}
310 
311 	hscb = scb->hscb;
312 
313 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("start scb(%p)\n", scb));
314 	scb->xs = xs;
315 	timeout_set(&xs->stimeout, ahc_timeout, scb);
316 
317 	/*
318 	 * Put all the arguments for the xfer in the scb
319 	 */
320 	hscb->control = 0;
321 	hscb->scsiid = BUILD_SCSIID(ahc, xs->sc_link, target_id, our_id);
322 	hscb->lun = xs->sc_link->lun;
323 	if (xs->xs_control & XS_CTL_RESET) {
324 		hscb->cdb_len = 0;
325 		scb->flags |= SCB_DEVICE_RESET;
326 		hscb->control |= MK_MESSAGE;
327 		ahc_execute_scb(scb, NULL, 0);
328 		return;
329 	}
330 
331 	ahc_setup_data(ahc, xs, scb);
332 }
333 
334 void
335 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
336 {
337 	struct	scb *scb;
338 	struct	scsi_xfer *xs;
339 	struct	ahc_softc *ahc;
340 	struct	ahc_initiator_tinfo *tinfo;
341 	struct	ahc_tmode_tstate *tstate;
342 
343 	u_int	mask;
344 	int	s;
345 
346 	scb = (struct scb *)arg;
347 	xs = scb->xs;
348 	xs->error = CAM_REQ_INPROG;
349 	xs->status = 0;
350 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
351 
352 	if (nsegments != 0) {
353 		struct	  ahc_dma_seg *sg;
354 		bus_dma_segment_t *end_seg;
355 		int op;
356 
357 		end_seg = dm_segs + nsegments;
358 
359 		/* Copy the segments into our SG list */
360 		sg = scb->sg_list;
361 		while (dm_segs < end_seg) {
362 			uint32_t len;
363 
364 			sg->addr = aic_htole32(dm_segs->ds_addr);
365 			len = dm_segs->ds_len
366 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
367 			sg->len = aic_htole32(len);
368 			sg++;
369 			dm_segs++;
370 		}
371 
372 		/*
373 		 * Note where to find the SG entries in bus space.
374 		 * We also set the full residual flag which the
375 		 * sequencer will clear as soon as a data transfer
376 		 * occurs.
377 		 */
378 		scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
379 
380 		if ((xs->flags & SCSI_DATA_IN) != 0)
381 			op = BUS_DMASYNC_PREREAD;
382 		else
383 			op = BUS_DMASYNC_PREWRITE;
384 
385 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
386 				scb->dmamap->dm_mapsize, op);
387 
388 		sg--;
389 		sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
390 
391 		bus_dmamap_sync(ahc->parent_dmat, scb->sg_map->sg_dmamap,
392 		    0, scb->sg_map->sg_dmamap->dm_mapsize,
393 		    BUS_DMASYNC_PREWRITE);
394 
395 		/* Copy the first SG into the "current" data pointer area */
396 		scb->hscb->dataptr = scb->sg_list->addr;
397 		scb->hscb->datacnt = scb->sg_list->len;
398 	} else {
399 		scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
400 		scb->hscb->dataptr = 0;
401 		scb->hscb->datacnt = 0;
402 	}
403 
404 	scb->sg_count = nsegments;
405 
406 	s = splbio();
407 
408 	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
409 				    SCSIID_OUR_ID(scb->hscb->scsiid),
410 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
411 				    &tstate);
412 
413 	mask = SCB_GET_TARGET_MASK(ahc, scb);
414 	scb->hscb->scsirate = tinfo->scsirate;
415 	scb->hscb->scsioffset = tinfo->curr.offset;
416 
417 	if ((tstate->ultraenb & mask) != 0)
418 		scb->hscb->control |= ULTRAENB;
419 
420 	if ((tstate->discenable & mask) != 0)
421 	    	scb->hscb->control |= DISCENB;
422 
423 	if ((tstate->auto_negotiate & mask) != 0) {
424 		scb->flags |= SCB_AUTO_NEGOTIATE;
425 		scb->hscb->control |= MK_MESSAGE;
426 	}
427 
428 	if ((tstate->tagenable & mask) != 0)
429 		scb->hscb->control |= TAG_ENB;
430 
431 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
432 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
433 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
434 
435 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
436 
437 	if (!(xs->flags & SCSI_POLL))
438 		timeout_add_msec(&xs->stimeout, xs->timeout);
439 
440 	/*
441 	 * We only allow one untagged transaction
442 	 * per target in the initiator role unless
443 	 * we are storing a full busy target *lun*
444 	 * table in SCB space.
445 	 *
446 	 * This really should not be of any
447 	 * concern, as we take care to avoid this
448 	 * in ahc_done().  XXX smurph
449 	 */
450 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
451 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
452 		struct scb_tailq *untagged_q;
453 		int target_offset;
454 
455 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
456 		untagged_q = &(ahc->untagged_queues[target_offset]);
457 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
458 		scb->flags |= SCB_UNTAGGEDQ;
459 		if (TAILQ_FIRST(untagged_q) != scb) {
460 			if (xs->flags & SCSI_POLL)
461 				goto poll;
462 			else {
463 				splx(s);
464 				return;
465 			}
466 		}
467 	}
468 	scb->flags |= SCB_ACTIVE;
469 
470 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
471 		/* Define a mapping from our tag to the SCB. */
472 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
473 		ahc_pause(ahc);
474 		if ((ahc->flags & AHC_PAGESCBS) == 0)
475 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
476 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
477 		ahc_unpause(ahc);
478 	} else {
479 		ahc_queue_scb(ahc, scb);
480 	}
481 
482 	if (!(xs->flags & SCSI_POLL)) {
483 		if (ahc->inited_target[xs->sc_link->target] == 0) {
484 			struct	ahc_devinfo devinfo;
485 
486 			ahc_adapter_req_set_xfer_mode(ahc, scb);
487 			ahc_scb_devinfo(ahc, &devinfo, scb);
488 			ahc_update_neg_request(ahc, &devinfo, tstate, tinfo,
489 			    AHC_NEG_IF_NON_ASYNC);
490 
491 			ahc->inited_target[xs->sc_link->target] = 1;
492 		}
493 		splx(s);
494 		return;
495 	}
496 
497 	/*
498 	 * If we can't use interrupts, poll for completion
499 	 */
500 poll:
501 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_poll\n"));
502 
503 	do {
504 		if (ahc_poll(ahc, xs->timeout)) {
505 			if (!(xs->flags & SCSI_SILENT))
506 				printf("cmd fail\n");
507 			ahc_timeout(scb);
508 			break;
509 		}
510 	} while (!(xs->flags & ITSDONE));
511 
512 	splx(s);
513 }
514 
515 int
516 ahc_poll(struct ahc_softc *ahc, int wait)
517 {
518 	while (--wait) {
519 		DELAY(1000);
520 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
521 			break;
522 	}
523 
524 	if (wait == 0) {
525 		printf("%s: board is not responding\n", ahc_name(ahc));
526 		return (EIO);
527 	}
528 
529 	ahc_intr((void *)ahc);
530 	return (0);
531 }
532 
533 void
534 ahc_setup_data(struct ahc_softc *ahc, struct scsi_xfer *xs,
535 	       struct scb *scb)
536 {
537 	struct hardware_scb *hscb;
538 	int s;
539 
540 	hscb = scb->hscb;
541 	xs->resid = xs->status = 0;
542 	xs->error = CAM_REQ_INPROG;
543 
544 	hscb->cdb_len = xs->cmdlen;
545 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
546 		s = splbio();
547 		ahc_free_scb(ahc, scb);
548 		splx(s);
549 		xs->error = XS_DRIVER_STUFFUP;
550 		scsi_done(xs);
551 		return;
552 	}
553 
554 	if (hscb->cdb_len > 12) {
555 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
556 		scb->flags |= SCB_CDB32_PTR;
557 	} else {
558 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
559 	}
560 
561 	/* Only use S/G if there is a transfer */
562 	if (xs->datalen) {
563 		int error;
564 
565                 error = bus_dmamap_load(ahc->parent_dmat,
566 					scb->dmamap, xs->data,
567 					xs->datalen, NULL,
568 					(xs->flags & SCSI_NOSLEEP) ?
569 					BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
570 		if (error) {
571 #ifdef AHC_DEBUG
572                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
573 			       "= %d\n",
574 			       ahc_name(ahc), error);
575 #endif
576 			s = splbio();
577 			ahc_free_scb(ahc, scb);
578 			splx(s);
579 			xs->error = XS_DRIVER_STUFFUP;
580 			scsi_done(xs);
581 			return;
582 		}
583 		ahc_execute_scb(scb, scb->dmamap->dm_segs,
584 		    scb->dmamap->dm_nsegs);
585 	} else {
586 		ahc_execute_scb(scb, NULL, 0);
587 	}
588 }
589 
590 void
591 ahc_timeout(void *arg)
592 {
593 	struct	scb *scb, *list_scb;
594 	struct	ahc_softc *ahc;
595 	int	s;
596 	int	found;
597 	char	channel;
598 
599 	scb = (struct scb *)arg;
600 	ahc = (struct ahc_softc *)scb->xs->sc_link->adapter_softc;
601 
602 	s = splbio();
603 
604 #ifdef AHC_DEBUG
605 	printf("%s: SCB %d timed out\n", ahc_name(ahc), scb->hscb->tag);
606 	ahc_dump_card_state(ahc);
607 #endif
608 
609 	ahc_pause(ahc);
610 
611 	if (scb->flags & SCB_ACTIVE) {
612 		channel = SCB_GET_CHANNEL(ahc, scb);
613 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
614 		/*
615 		 * Go through all of our pending SCBs and remove
616 		 * any scheduled timeouts for them. They're about to be
617 		 * aborted so no need for them to timeout.
618 		 */
619 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
620 			if (list_scb->xs)
621 				timeout_del(&list_scb->xs->stimeout);
622 		}
623 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
624 #ifdef AHC_DEBUG
625 		printf("%s: Issued Channel %c Bus Reset %d SCBs aborted\n",
626 		    ahc_name(ahc), channel, found);
627 #endif
628 	}
629 
630 	ahc_unpause(ahc);
631 	splx(s);
632 }
633 
634 
635 void
636 ahc_platform_set_tags(struct ahc_softc *ahc,
637 		      struct ahc_devinfo *devinfo, int alg)
638 {
639 	struct ahc_tmode_tstate *tstate;
640 
641 	ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
642 			    devinfo->target, &tstate);
643 
644 	/* XXXX Need to check quirks before doing this! XXXX */
645 
646 	switch (alg) {
647 	case AHC_QUEUE_BASIC:
648 	case AHC_QUEUE_TAGGED:
649 		tstate->tagenable |= devinfo->target_mask;
650 		break;
651 	case AHC_QUEUE_NONE:
652 		tstate->tagenable &= ~devinfo->target_mask;
653 		break;
654 	}
655 }
656 
657 int
658 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
659 {
660 	if (sizeof(struct ahc_platform_data) > 0) {
661 		ahc->platform_data = malloc(sizeof(struct ahc_platform_data),
662 		    M_DEVBUF, M_NOWAIT | M_ZERO);
663 		if (ahc->platform_data == NULL)
664 			return (ENOMEM);
665 	}
666 
667 	return (0);
668 }
669 
670 void
671 ahc_platform_free(struct ahc_softc *ahc)
672 {
673 	if (sizeof(struct ahc_platform_data) > 0)
674 		free(ahc->platform_data, M_DEVBUF);
675 }
676 
677 int
678 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
679 {
680 	return (0);
681 }
682 
683 void
684 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
685 		ac_code code, void *opt_arg)
686 {
687 	/* Nothing to do here for OpenBSD */
688 }
689 
690 void
691 ahc_adapter_req_set_xfer_mode(struct ahc_softc *ahc, struct scb *scb)
692 {
693 	struct ahc_initiator_tinfo *tinfo;
694 	struct ahc_tmode_tstate *tstate;
695 	struct ahc_syncrate *syncrate;
696 	struct ahc_devinfo devinfo;
697 	u_int16_t quirks;
698 	u_int width, ppr_options, period, offset;
699 	int s;
700 
701 	s = splbio();
702 
703 	ahc_scb_devinfo(ahc, &devinfo, scb);
704 	quirks = scb->xs->sc_link->quirks;
705 	tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
706 	    devinfo.our_scsiid, devinfo.target, &tstate);
707 
708 	tstate->discenable |= (ahc->user_discenable & devinfo.target_mask);
709 
710 	if (quirks & SDEV_NOTAGS)
711 		tstate->tagenable &= ~devinfo.target_mask;
712 	else if (ahc->user_tagenable & devinfo.target_mask)
713 		tstate->tagenable |= devinfo.target_mask;
714 
715 	if (quirks & SDEV_NOWIDE)
716 		width = MSG_EXT_WDTR_BUS_8_BIT;
717 	else
718 		width = MSG_EXT_WDTR_BUS_16_BIT;
719 
720 	ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
721 	if (width > tinfo->user.width)
722 		width = tinfo->user.width;
723 	ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
724 
725 	if (quirks & SDEV_NOSYNC) {
726 		period = 0;
727 		offset = 0;
728 	} else {
729 		period = tinfo->user.period;
730 		offset = tinfo->user.offset;
731 	}
732 
733 	/* XXX Look at saved INQUIRY flags for PPR capabilities XXX */
734 	ppr_options = tinfo->user.ppr_options;
735 	/* XXX Other reasons to avoid ppr? XXX */
736 	if (width < MSG_EXT_WDTR_BUS_16_BIT)
737 		ppr_options = 0;
738 
739 	if ((tstate->discenable & devinfo.target_mask) == 0 ||
740 	    (tstate->tagenable & devinfo.target_mask) == 0)
741 		ppr_options &= ~MSG_EXT_PPR_PROT_IUS;
742 
743 	syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
744 	    AHC_SYNCRATE_MAX);
745 	ahc_validate_offset(ahc, NULL, syncrate, &offset, width,
746 	    ROLE_UNKNOWN);
747 
748 	if (offset == 0) {
749 		period = 0;
750 		ppr_options = 0;
751 	}
752 
753 	if (ppr_options != 0 && tinfo->user.transport_version >= 3) {
754 		tinfo->goal.transport_version = tinfo->user.transport_version;
755 		tinfo->curr.transport_version = tinfo->user.transport_version;
756 	}
757 
758 	ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options,
759 	    AHC_TRANS_GOAL, FALSE);
760 
761 	splx(s);
762 }
763