xref: /openbsd-src/sys/dev/ic/aic7xxx_openbsd.c (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: aic7xxx_openbsd.c,v 1.55 2016/08/17 01:17:54 krw Exp $	*/
2 /*	$NetBSD: aic7xxx_osm.c,v 1.14 2003/11/02 11:07:44 wiz Exp $	*/
3 
4 /*
5  * Bus independent OpenBSD shim for the aic7xxx based adaptec SCSI controllers
6  *
7  * Copyright (c) 1994-2001 Justin T. Gibbs.
8  * Copyright (c) 2001-2002 Steve Murphree, Jr.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. The name of the author may not be used to endorse or promote products
18  *    derived from this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU Public License ("GPL").
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
27  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic7xxx_osm.c#12 $
36  *
37  * $FreeBSD: src/sys/dev/aic7xxx/aic7xxx_osm.c,v 1.31 2002/11/30 19:08:58 scottl Exp $
38  */
39 /*
40  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc. - April 2003
41  */
42 
43 #include <dev/ic/aic7xxx_openbsd.h>
44 #include <dev/ic/aic7xxx_inline.h>
45 
46 #ifndef AHC_TMODE_ENABLE
47 #define AHC_TMODE_ENABLE 0
48 #endif
49 
50 
51 void	ahc_action(struct scsi_xfer *);
52 void	ahc_execute_scb(void *, bus_dma_segment_t *, int);
53 int	ahc_poll(struct ahc_softc *, int);
54 void	ahc_setup_data(struct ahc_softc *, struct scsi_xfer *, struct scb *);
55 
56 void	ahc_minphys(struct buf *, struct scsi_link *);
57 void	ahc_adapter_req_set_xfer_mode(struct ahc_softc *, struct scb *);
58 
59 
60 struct cfdriver ahc_cd = {
61 	NULL, "ahc", DV_DULL
62 };
63 
64 static struct scsi_adapter ahc_switch =
65 {
66 	ahc_action,
67 	ahc_minphys,
68 	0,
69 	0,
70 };
71 
72 /*
73  * Attach all the sub-devices we can find
74  */
75 int
76 ahc_attach(struct ahc_softc *ahc)
77 {
78 	struct scsibus_attach_args saa;
79 	int s;
80 
81         s = splbio();
82 
83 	/*
84 	 * fill in the prototype scsi_links.
85 	 */
86 	ahc->sc_channel.adapter_target = ahc->our_id;
87 	if (ahc->features & AHC_WIDE)
88 		ahc->sc_channel.adapter_buswidth = 16;
89 	ahc->sc_channel.adapter_softc = ahc;
90 	ahc->sc_channel.adapter = &ahc_switch;
91 	ahc->sc_channel.openings = 16;
92 	ahc->sc_channel.pool = &ahc->sc_iopool;
93 
94 	if (ahc->features & AHC_TWIN) {
95 		/* Configure the second scsi bus */
96 		ahc->sc_channel_b = ahc->sc_channel;
97 		ahc->sc_channel_b.adapter_target = ahc->our_id_b;
98 	}
99 
100 #ifndef DEBUG
101 	if (bootverbose) {
102 		char ahc_info[256];
103 		ahc_controller_info(ahc, ahc_info, sizeof ahc_info);
104 		printf("%s: %s\n", ahc->sc_dev.dv_xname, ahc_info);
105 	}
106 #endif
107 
108 	ahc_intr_enable(ahc, TRUE);
109 
110 	if (ahc->flags & AHC_RESET_BUS_A)
111 		ahc_reset_channel(ahc, 'A', TRUE);
112 	if ((ahc->features & AHC_TWIN) && ahc->flags & AHC_RESET_BUS_B)
113 		ahc_reset_channel(ahc, 'B', TRUE);
114 
115 	bzero(&saa, sizeof(saa));
116 	if ((ahc->flags & AHC_PRIMARY_CHANNEL) == 0) {
117 		saa.saa_sc_link = &ahc->sc_channel;
118 		ahc->sc_child = config_found((void *)&ahc->sc_dev,
119 		    &saa, scsiprint);
120 		if (ahc->features & AHC_TWIN) {
121 			saa.saa_sc_link = &ahc->sc_channel_b;
122 			ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
123 			    &saa, scsiprint);
124 		}
125 	} else {
126 		if (ahc->features & AHC_TWIN) {
127 			saa.saa_sc_link = &ahc->sc_channel_b;
128 			ahc->sc_child = config_found((void *)&ahc->sc_dev,
129 			    &saa, scsiprint);
130 		}
131 		saa.saa_sc_link = &ahc->sc_channel;
132 		ahc->sc_child_b = config_found((void *)&ahc->sc_dev,
133 		    &saa, scsiprint);
134 	}
135 
136 	splx(s);
137 	return (1);
138 }
139 
140 /*
141  * Catch an interrupt from the adapter
142  */
143 int
144 ahc_platform_intr(void *arg)
145 {
146 	struct	ahc_softc *ahc = (struct ahc_softc *)arg;
147 
148 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
149 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
150 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
151 
152 	return ahc_intr(ahc);
153 }
154 
155 /*
156  * We have an scb which has been processed by the
157  * adaptor, now we look to see how the operation
158  * went.
159  */
160 void
161 ahc_done(struct ahc_softc *ahc, struct scb *scb)
162 {
163 	struct scsi_xfer *xs = scb->xs;
164 
165 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
166 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
167 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
168 
169 	LIST_REMOVE(scb, pending_links);
170 	if ((scb->flags & SCB_UNTAGGEDQ) != 0) {
171 		struct scb_tailq *untagged_q;
172 		int target_offset;
173 
174 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
175 		untagged_q = &ahc->untagged_queues[target_offset];
176 		TAILQ_REMOVE(untagged_q, scb, links.tqe);
177 		scb->flags &= ~SCB_UNTAGGEDQ;
178 		ahc_run_untagged_queue(ahc, untagged_q);
179 	}
180 
181 	timeout_del(&xs->stimeout);
182 
183 	if (xs->datalen) {
184 		int op;
185 
186 		if ((xs->flags & SCSI_DATA_IN) != 0)
187 			op = BUS_DMASYNC_POSTREAD;
188 		else
189 			op = BUS_DMASYNC_POSTWRITE;
190 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
191 				scb->dmamap->dm_mapsize, op);
192 		bus_dmamap_unload(ahc->parent_dmat, scb->dmamap);
193 	}
194 
195 	/* Translate the CAM status code to a SCSI error code. */
196 	switch (xs->error) {
197 	case CAM_SCSI_STATUS_ERROR:
198 	case CAM_REQ_INPROG:
199 	case CAM_REQ_CMP:
200 		switch (xs->status) {
201 		case SCSI_TASKSET_FULL:
202 		case SCSI_BUSY:
203 			xs->error = XS_BUSY;
204 			break;
205 		case SCSI_CHECK:
206 		case SCSI_TERMINATED:
207 			if ((scb->flags & SCB_SENSE) == 0) {
208 				/* CHECK on CHECK? */
209 				xs->error = XS_DRIVER_STUFFUP;
210 			} else
211 				xs->error = XS_NOERROR;
212 			break;
213 		default:
214 			xs->error = XS_NOERROR;
215 			break;
216 		}
217 		break;
218 	case CAM_REQUEUE_REQ:
219 	case CAM_BUSY:
220 		xs->error = XS_BUSY;
221 		break;
222 	case CAM_CMD_TIMEOUT:
223 		xs->error = XS_TIMEOUT;
224 		break;
225 	case CAM_BDR_SENT:
226 	case CAM_SCSI_BUS_RESET:
227 		xs->error = XS_RESET;
228 		break;
229 	case CAM_SEL_TIMEOUT:
230 		xs->error = XS_SELTIMEOUT;
231 		break;
232 	default:
233 		xs->error = XS_DRIVER_STUFFUP;
234 		break;
235 	}
236 
237 	/* Don't clobber any existing error state */
238 	if (xs->error != XS_NOERROR) {
239 	  /* Don't clobber any existing error state */
240 	} else if ((scb->flags & SCB_SENSE) != 0) {
241 		/*
242 		 * We performed autosense retrieval.
243 		 *
244 		 * Zero any sense not transferred by the
245 		 * device.  The SCSI spec mandates that any
246 		 * untransferred data should be assumed to be
247 		 * zero.  Complete the 'bounce' of sense information
248 		 * through buffers accessible via bus-space by
249 		 * copying it into the clients csio.
250 		 */
251 		memset(&xs->sense, 0, sizeof(struct scsi_sense_data));
252 		memcpy(&xs->sense, ahc_get_sense_buf(ahc, scb),
253 		    aic_le32toh(scb->sg_list->len) & AHC_SG_LEN_MASK);
254 		xs->error = XS_SENSE;
255 	}
256 
257 	scsi_done(xs);
258 }
259 
260 void
261 ahc_minphys(struct buf *bp, struct scsi_link *sl)
262 {
263 	/*
264 	 * Even though the card can transfer up to 16megs per command
265 	 * we are limited by the number of segments in the dma segment
266 	 * list that we can hold.  The worst case is that all pages are
267 	 * discontinuous physically, hence the "page per segment" limit
268 	 * enforced here.
269 	 */
270 	if (bp->b_bcount > ((AHC_NSEG - 1) * PAGE_SIZE)) {
271 		bp->b_bcount = ((AHC_NSEG - 1) * PAGE_SIZE);
272 	}
273 	minphys(bp);
274 }
275 
276 void
277 ahc_action(struct scsi_xfer *xs)
278 {
279 	struct ahc_softc *ahc;
280 	struct scb *scb;
281 	struct hardware_scb *hscb;
282 	u_int target_id;
283 	u_int our_id;
284 
285 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("ahc_action\n"));
286 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
287 
288 	target_id = xs->sc_link->target;
289 	our_id = SCSI_SCSI_ID(ahc, xs->sc_link);
290 
291 	/*
292 	 * get the scb to use.
293 	 */
294 	scb = xs->io;
295 
296 	/* Clean up for the next user */
297 	scb->flags = SCB_FLAG_NONE;
298 
299 	hscb = scb->hscb;
300 	hscb->control = 0;
301 	ahc->scb_data->scbindex[hscb->tag] = NULL;
302 
303 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("start scb(%p)\n", scb));
304 	scb->xs = xs;
305 	timeout_set(&xs->stimeout, ahc_timeout, scb);
306 
307 	/*
308 	 * Put all the arguments for the xfer in the scb
309 	 */
310 	hscb->control = 0;
311 	hscb->scsiid = BUILD_SCSIID(ahc, xs->sc_link, target_id, our_id);
312 	hscb->lun = xs->sc_link->lun;
313 	if (xs->xs_control & XS_CTL_RESET) {
314 		hscb->cdb_len = 0;
315 		scb->flags |= SCB_DEVICE_RESET;
316 		hscb->control |= MK_MESSAGE;
317 		ahc_execute_scb(scb, NULL, 0);
318 		return;
319 	}
320 
321 	ahc_setup_data(ahc, xs, scb);
322 }
323 
324 void
325 ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
326 {
327 	struct	scb *scb;
328 	struct	scsi_xfer *xs;
329 	struct	ahc_softc *ahc;
330 	struct	ahc_initiator_tinfo *tinfo;
331 	struct	ahc_tmode_tstate *tstate;
332 
333 	u_int	mask;
334 	int	s;
335 
336 	scb = (struct scb *)arg;
337 	xs = scb->xs;
338 	xs->error = CAM_REQ_INPROG;
339 	xs->status = 0;
340 	ahc = (struct ahc_softc *)xs->sc_link->adapter_softc;
341 
342 	if (nsegments != 0) {
343 		struct	  ahc_dma_seg *sg;
344 		bus_dma_segment_t *end_seg;
345 		int op;
346 
347 		end_seg = dm_segs + nsegments;
348 
349 		/* Copy the segments into our SG list */
350 		sg = scb->sg_list;
351 		while (dm_segs < end_seg) {
352 			uint32_t len;
353 
354 			sg->addr = aic_htole32(dm_segs->ds_addr);
355 			len = dm_segs->ds_len
356 			    | ((dm_segs->ds_addr >> 8) & 0x7F000000);
357 			sg->len = aic_htole32(len);
358 			sg++;
359 			dm_segs++;
360 		}
361 
362 		/*
363 		 * Note where to find the SG entries in bus space.
364 		 * We also set the full residual flag which the
365 		 * sequencer will clear as soon as a data transfer
366 		 * occurs.
367 		 */
368 		scb->hscb->sgptr = aic_htole32(scb->sg_list_phys|SG_FULL_RESID);
369 
370 		if ((xs->flags & SCSI_DATA_IN) != 0)
371 			op = BUS_DMASYNC_PREREAD;
372 		else
373 			op = BUS_DMASYNC_PREWRITE;
374 
375 		bus_dmamap_sync(ahc->parent_dmat, scb->dmamap, 0,
376 				scb->dmamap->dm_mapsize, op);
377 
378 		sg--;
379 		sg->len |= aic_htole32(AHC_DMA_LAST_SEG);
380 
381 		bus_dmamap_sync(ahc->parent_dmat, scb->sg_map->sg_dmamap,
382 		    0, scb->sg_map->sg_dmamap->dm_mapsize,
383 		    BUS_DMASYNC_PREWRITE);
384 
385 		/* Copy the first SG into the "current" data pointer area */
386 		scb->hscb->dataptr = scb->sg_list->addr;
387 		scb->hscb->datacnt = scb->sg_list->len;
388 	} else {
389 		scb->hscb->sgptr = aic_htole32(SG_LIST_NULL);
390 		scb->hscb->dataptr = 0;
391 		scb->hscb->datacnt = 0;
392 	}
393 
394 	scb->sg_count = nsegments;
395 
396 	s = splbio();
397 
398 	tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid),
399 				    SCSIID_OUR_ID(scb->hscb->scsiid),
400 				    SCSIID_TARGET(ahc, scb->hscb->scsiid),
401 				    &tstate);
402 
403 	mask = SCB_GET_TARGET_MASK(ahc, scb);
404 	scb->hscb->scsirate = tinfo->scsirate;
405 	scb->hscb->scsioffset = tinfo->curr.offset;
406 
407 	if ((tstate->ultraenb & mask) != 0)
408 		scb->hscb->control |= ULTRAENB;
409 
410 	if ((tstate->discenable & mask) != 0)
411 		scb->hscb->control |= DISCENB;
412 
413 	if ((tstate->auto_negotiate & mask) != 0) {
414 		scb->flags |= SCB_AUTO_NEGOTIATE;
415 		scb->hscb->control |= MK_MESSAGE;
416 	}
417 
418 	if ((tstate->tagenable & mask) != 0)
419 		scb->hscb->control |= TAG_ENB;
420 
421 	bus_dmamap_sync(ahc->parent_dmat, ahc->scb_data->hscb_dmamap,
422 	    0, ahc->scb_data->hscb_dmamap->dm_mapsize,
423 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
424 
425 	LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
426 
427 	if (!(xs->flags & SCSI_POLL))
428 		timeout_add_msec(&xs->stimeout, xs->timeout);
429 
430 	/*
431 	 * We only allow one untagged transaction
432 	 * per target in the initiator role unless
433 	 * we are storing a full busy target *lun*
434 	 * table in SCB space.
435 	 *
436 	 * This really should not be of any
437 	 * concern, as we take care to avoid this
438 	 * in ahc_done().  XXX smurph
439 	 */
440 	if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0
441 	    && (ahc->flags & AHC_SCB_BTT) == 0) {
442 		struct scb_tailq *untagged_q;
443 		int target_offset;
444 
445 		target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
446 		untagged_q = &(ahc->untagged_queues[target_offset]);
447 		TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
448 		scb->flags |= SCB_UNTAGGEDQ;
449 		if (TAILQ_FIRST(untagged_q) != scb) {
450 			if (xs->flags & SCSI_POLL)
451 				goto poll;
452 			else {
453 				splx(s);
454 				return;
455 			}
456 		}
457 	}
458 	scb->flags |= SCB_ACTIVE;
459 
460 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
461 		/* Define a mapping from our tag to the SCB. */
462 		ahc->scb_data->scbindex[scb->hscb->tag] = scb;
463 		ahc_pause(ahc);
464 		if ((ahc->flags & AHC_PAGESCBS) == 0)
465 			ahc_outb(ahc, SCBPTR, scb->hscb->tag);
466 		ahc_outb(ahc, TARG_IMMEDIATE_SCB, scb->hscb->tag);
467 		ahc_unpause(ahc);
468 	} else {
469 		ahc_queue_scb(ahc, scb);
470 	}
471 
472 	if (!(xs->flags & SCSI_POLL)) {
473 		if (ahc->inited_target[xs->sc_link->target] == 0) {
474 			struct	ahc_devinfo devinfo;
475 
476 			ahc_adapter_req_set_xfer_mode(ahc, scb);
477 			ahc_scb_devinfo(ahc, &devinfo, scb);
478 			ahc_update_neg_request(ahc, &devinfo, tstate, tinfo,
479 			    AHC_NEG_IF_NON_ASYNC);
480 
481 			ahc->inited_target[xs->sc_link->target] = 1;
482 		}
483 		splx(s);
484 		return;
485 	}
486 
487 	/*
488 	 * If we can't use interrupts, poll for completion
489 	 */
490 poll:
491 	SC_DEBUG(xs->sc_link, SDEV_DB3, ("cmd_poll\n"));
492 
493 	do {
494 		if (ahc_poll(ahc, xs->timeout)) {
495 			if (!(xs->flags & SCSI_SILENT))
496 				printf("cmd fail\n");
497 			ahc_timeout(scb);
498 			break;
499 		}
500 	} while (!(xs->flags & ITSDONE));
501 
502 	splx(s);
503 }
504 
505 int
506 ahc_poll(struct ahc_softc *ahc, int wait)
507 {
508 	while (--wait) {
509 		DELAY(1000);
510 		if (ahc_inb(ahc, INTSTAT) & INT_PEND)
511 			break;
512 	}
513 
514 	if (wait == 0) {
515 		printf("%s: board is not responding\n", ahc_name(ahc));
516 		return (EIO);
517 	}
518 
519 	ahc_intr((void *)ahc);
520 	return (0);
521 }
522 
523 void
524 ahc_setup_data(struct ahc_softc *ahc, struct scsi_xfer *xs,
525 	       struct scb *scb)
526 {
527 	struct hardware_scb *hscb;
528 
529 	hscb = scb->hscb;
530 	xs->resid = xs->status = 0;
531 	xs->error = CAM_REQ_INPROG;
532 
533 	hscb->cdb_len = xs->cmdlen;
534 	if (hscb->cdb_len > sizeof(hscb->cdb32)) {
535 		xs->error = XS_DRIVER_STUFFUP;
536 		scsi_done(xs);
537 		return;
538 	}
539 
540 	if (hscb->cdb_len > 12) {
541 		memcpy(hscb->cdb32, xs->cmd, hscb->cdb_len);
542 		scb->flags |= SCB_CDB32_PTR;
543 	} else {
544 		memcpy(hscb->shared_data.cdb, xs->cmd, hscb->cdb_len);
545 	}
546 
547 	/* Only use S/G if there is a transfer */
548 	if (xs->datalen) {
549 		int error;
550 
551                 error = bus_dmamap_load(ahc->parent_dmat,
552 					scb->dmamap, xs->data,
553 					xs->datalen, NULL,
554 					(xs->flags & SCSI_NOSLEEP) ?
555 					BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
556 		if (error) {
557 #ifdef AHC_DEBUG
558                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
559 			       "= %d\n",
560 			       ahc_name(ahc), error);
561 #endif
562 			xs->error = XS_DRIVER_STUFFUP;
563 			scsi_done(xs);
564 			return;
565 		}
566 		ahc_execute_scb(scb, scb->dmamap->dm_segs,
567 		    scb->dmamap->dm_nsegs);
568 	} else {
569 		ahc_execute_scb(scb, NULL, 0);
570 	}
571 }
572 
573 void
574 ahc_timeout(void *arg)
575 {
576 	struct	scb *scb, *list_scb;
577 	struct	ahc_softc *ahc;
578 	int	s;
579 	int	found;
580 	char	channel;
581 
582 	scb = (struct scb *)arg;
583 	ahc = (struct ahc_softc *)scb->xs->sc_link->adapter_softc;
584 
585 	s = splbio();
586 
587 #ifdef AHC_DEBUG
588 	printf("%s: SCB %d timed out\n", ahc_name(ahc), scb->hscb->tag);
589 	ahc_dump_card_state(ahc);
590 #endif
591 
592 	ahc_pause(ahc);
593 
594 	if (scb->flags & SCB_ACTIVE) {
595 		channel = SCB_GET_CHANNEL(ahc, scb);
596 		ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT);
597 		/*
598 		 * Go through all of our pending SCBs and remove
599 		 * any scheduled timeouts for them. They're about to be
600 		 * aborted so no need for them to timeout.
601 		 */
602 		LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) {
603 			if (list_scb->xs)
604 				timeout_del(&list_scb->xs->stimeout);
605 		}
606 		found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE);
607 #ifdef AHC_DEBUG
608 		printf("%s: Issued Channel %c Bus Reset %d SCBs aborted\n",
609 		    ahc_name(ahc), channel, found);
610 #endif
611 	}
612 
613 	ahc_unpause(ahc);
614 	splx(s);
615 }
616 
617 
618 void
619 ahc_platform_set_tags(struct ahc_softc *ahc,
620 		      struct ahc_devinfo *devinfo, int alg)
621 {
622 	struct ahc_tmode_tstate *tstate;
623 
624 	ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
625 			    devinfo->target, &tstate);
626 
627 	/* XXXX Need to check quirks before doing this! XXXX */
628 
629 	switch (alg) {
630 	case AHC_QUEUE_BASIC:
631 	case AHC_QUEUE_TAGGED:
632 		tstate->tagenable |= devinfo->target_mask;
633 		break;
634 	case AHC_QUEUE_NONE:
635 		tstate->tagenable &= ~devinfo->target_mask;
636 		break;
637 	}
638 }
639 
640 int
641 ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
642 {
643 	if (sizeof(struct ahc_platform_data) > 0) {
644 		ahc->platform_data = malloc(sizeof(struct ahc_platform_data),
645 		    M_DEVBUF, M_NOWAIT | M_ZERO);
646 		if (ahc->platform_data == NULL)
647 			return (ENOMEM);
648 	}
649 
650 	return (0);
651 }
652 
653 void
654 ahc_platform_free(struct ahc_softc *ahc)
655 {
656 	if (sizeof(struct ahc_platform_data) > 0)
657 		free(ahc->platform_data, M_DEVBUF, 0);
658 }
659 
660 int
661 ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc)
662 {
663 	return (0);
664 }
665 
666 void
667 ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, u_int lun,
668 		ac_code code, void *opt_arg)
669 {
670 	/* Nothing to do here for OpenBSD */
671 }
672 
673 void
674 ahc_adapter_req_set_xfer_mode(struct ahc_softc *ahc, struct scb *scb)
675 {
676 	struct ahc_initiator_tinfo *tinfo;
677 	struct ahc_tmode_tstate *tstate;
678 	struct ahc_syncrate *syncrate;
679 	struct ahc_devinfo devinfo;
680 	u_int16_t quirks;
681 	u_int width, ppr_options, period, offset;
682 	int s;
683 
684 	s = splbio();
685 
686 	ahc_scb_devinfo(ahc, &devinfo, scb);
687 	quirks = scb->xs->sc_link->quirks;
688 	tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
689 	    devinfo.our_scsiid, devinfo.target, &tstate);
690 
691 	tstate->discenable |= (ahc->user_discenable & devinfo.target_mask);
692 
693 	if (quirks & SDEV_NOTAGS)
694 		tstate->tagenable &= ~devinfo.target_mask;
695 	else if (ahc->user_tagenable & devinfo.target_mask)
696 		tstate->tagenable |= devinfo.target_mask;
697 
698 	if (quirks & SDEV_NOWIDE)
699 		width = MSG_EXT_WDTR_BUS_8_BIT;
700 	else
701 		width = MSG_EXT_WDTR_BUS_16_BIT;
702 
703 	ahc_validate_width(ahc, NULL, &width, ROLE_UNKNOWN);
704 	if (width > tinfo->user.width)
705 		width = tinfo->user.width;
706 	ahc_set_width(ahc, &devinfo, width, AHC_TRANS_GOAL, FALSE);
707 
708 	if (quirks & SDEV_NOSYNC) {
709 		period = 0;
710 		offset = 0;
711 	} else {
712 		period = tinfo->user.period;
713 		offset = tinfo->user.offset;
714 	}
715 
716 	/* XXX Look at saved INQUIRY flags for PPR capabilities XXX */
717 	ppr_options = tinfo->user.ppr_options;
718 	/* XXX Other reasons to avoid ppr? XXX */
719 	if (width < MSG_EXT_WDTR_BUS_16_BIT)
720 		ppr_options = 0;
721 
722 	if ((tstate->discenable & devinfo.target_mask) == 0 ||
723 	    (tstate->tagenable & devinfo.target_mask) == 0)
724 		ppr_options &= ~MSG_EXT_PPR_PROT_IUS;
725 
726 	syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
727 	    AHC_SYNCRATE_MAX);
728 	ahc_validate_offset(ahc, NULL, syncrate, &offset, width,
729 	    ROLE_UNKNOWN);
730 
731 	if (offset == 0) {
732 		period = 0;
733 		ppr_options = 0;
734 	}
735 
736 	if (ppr_options != 0 && tinfo->user.transport_version >= 3) {
737 		tinfo->goal.transport_version = tinfo->user.transport_version;
738 		tinfo->curr.transport_version = tinfo->user.transport_version;
739 	}
740 
741 	ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset, ppr_options,
742 	    AHC_TRANS_GOAL, FALSE);
743 
744 	splx(s);
745 }
746 
747 /*
748  * Get a free scb. If there are none, see if we can allocate a new SCB.
749  */
750 void *
751 ahc_scb_alloc(void *xahc)
752 {
753 	struct ahc_softc *ahc = xahc;
754 	struct scb *scb;
755 
756 	mtx_enter(&ahc->sc_scb_mtx);
757 	scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
758 
759 	if (scb != NULL)
760 		SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
761 
762 	mtx_leave(&ahc->sc_scb_mtx);
763 
764 	return (scb);
765 }
766 
767 /*
768  * Return an SCB resource to the free list.
769  */
770 void
771 ahc_scb_free(void *xahc, void *io)
772 {
773 	struct ahc_softc *ahc = xahc;
774 	struct scb *scb = io;
775 	struct hardware_scb *hscb;
776 
777 	hscb = scb->hscb;
778 	/* Clean up for the next user */
779 	ahc->scb_data->scbindex[hscb->tag] = NULL;
780 	scb->flags = SCB_FLAG_NONE;
781 	hscb->control = 0;
782 
783 	mtx_enter(&ahc->sc_scb_mtx);
784 	SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
785 	mtx_leave(&ahc->sc_scb_mtx);
786 
787 	/* Notify the OSM that a resource is now available. */
788 	ahc_platform_scb_free(ahc, scb);
789 }
790