xref: /netbsd-src/sys/dev/ic/aic79xx_osm.c (revision 274254cdae52594c1aa480a736aef78313d15c9c)
1 /*	$NetBSD: aic79xx_osm.c,v 1.20 2008/06/24 10:07:40 gmcgarry Exp $	*/
2 
3 /*
4  * Bus independent NetBSD shim for the aic7xxx based adaptec SCSI controllers
5  *
6  * Copyright (c) 1994-2002 Justin T. Gibbs.
7  * Copyright (c) 2001-2002 Adaptec Inc.
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * Alternatively, this software may be distributed under the terms of the
20  * GNU Public License ("GPL").
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
26  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  * //depot/aic7xxx/freebsd/dev/aic7xxx/aic79xx_osm.c#26 $
35  *
36  * $FreeBSD: src/sys/dev/aic7xxx/aic79xx_osm.c,v 1.11 2003/05/04 00:20:07 gibbs Exp $
37  */
38 /*
39  * Ported from FreeBSD by Pascal Renauld, Network Storage Solutions, Inc.
40  * - April 2003
41  */
42 
43 #include <sys/cdefs.h>
44 __KERNEL_RCSID(0, "$NetBSD: aic79xx_osm.c,v 1.20 2008/06/24 10:07:40 gmcgarry Exp $");
45 
46 #include <dev/ic/aic79xx_osm.h>
47 #include <dev/ic/aic79xx_inline.h>
48 
49 #ifndef AHD_TMODE_ENABLE
50 #define AHD_TMODE_ENABLE 0
51 #endif
52 
53 static int	ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
54 			  void *addr, int flag, struct proc *p);
55 static void	ahd_action(struct scsipi_channel *chan,
56 			   scsipi_adapter_req_t req, void *arg);
57 static void	ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs,
58 				int nsegments);
59 static int	ahd_poll(struct ahd_softc *ahd, int wait);
60 static void	ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
61 			       struct scb *scb);
62 
63 #if NOT_YET
64 static void	ahd_set_recoveryscb(struct ahd_softc *ahd, struct scb *scb);
65 #endif
66 
67 /*
68  * Attach all the sub-devices we can find
69  */
70 int
71 ahd_attach(struct ahd_softc *ahd)
72 {
73 	int 	s;
74 	char	ahd_info[256];
75 
76 	ahd_controller_info(ahd, ahd_info, sizeof(ahd_info));
77         printf("%s: %s\n", device_xname(&ahd->sc_dev), ahd_info);
78 
79         ahd_lock(ahd, &s);
80 
81 	ahd->sc_adapter.adapt_dev = &ahd->sc_dev;
82 	ahd->sc_adapter.adapt_nchannels = 1;
83 
84 	ahd->sc_adapter.adapt_openings = ahd->scb_data.numscbs - 1;
85 	ahd->sc_adapter.adapt_max_periph = 32;
86 
87 	ahd->sc_adapter.adapt_ioctl = ahd_ioctl;
88 	ahd->sc_adapter.adapt_minphys = ahd_minphys;
89 	ahd->sc_adapter.adapt_request = ahd_action;
90 
91 	ahd->sc_channel.chan_adapter = &ahd->sc_adapter;
92         ahd->sc_channel.chan_bustype = &scsi_bustype;
93         ahd->sc_channel.chan_channel = 0;
94         ahd->sc_channel.chan_ntargets = AHD_NUM_TARGETS;
95         ahd->sc_channel.chan_nluns = 8 /*AHD_NUM_LUNS*/;
96         ahd->sc_channel.chan_id = ahd->our_id;
97         ahd->sc_channel.chan_flags |= SCSIPI_CHAN_CANGROW;
98 
99 	ahd->sc_child = config_found((void *)ahd, &ahd->sc_channel, scsiprint);
100 
101 	ahd_intr_enable(ahd, TRUE);
102 
103 	if (ahd->flags & AHD_RESET_BUS_A)
104 		ahd_reset_channel(ahd, 'A', TRUE);
105 
106         ahd_unlock(ahd, &s);
107 
108 	return (1);
109 }
110 
111 static int
112 ahd_ioctl(struct scsipi_channel *channel, u_long cmd,
113 	  void *addr, int flag, struct proc *p)
114 {
115         struct ahd_softc *ahd = (void *)channel->chan_adapter->adapt_dev;
116         int s, ret = ENOTTY;
117 
118         switch (cmd) {
119         case SCBUSIORESET:
120                 s = splbio();
121                 ahd_reset_channel(ahd, channel->chan_channel == 1 ? 'B' : 'A', TRUE);
122                 splx(s);
123                 ret = 0;
124                 break;
125         default:
126                 break;
127         }
128 
129         return ret;
130 }
131 
132 /*
133  * Catch an interrupt from the adapter
134  */
135 void
136 ahd_platform_intr(void *arg)
137 {
138 	struct	ahd_softc *ahd;
139 
140 	ahd = (struct ahd_softc *)arg;
141 
142 	printf("%s; ahd_platform_intr\n", ahd_name(ahd));
143 
144 	ahd_intr(ahd);
145 }
146 
147 /*
148  * We have an scb which has been processed by the
149  * adaptor, now we look to see how the operation * went.
150  */
151 void
152 ahd_done(struct ahd_softc *ahd, struct scb *scb)
153 {
154 	struct scsipi_xfer	*xs;
155 	struct scsipi_periph	*periph;
156 	int			s;
157 
158 	LIST_REMOVE(scb, pending_links);
159 
160 	xs = scb->xs;
161 	periph = xs->xs_periph;
162 
163 	callout_stop(&scb->xs->xs_callout);
164 
165 	if (xs->datalen) {
166 		int op;
167 
168 		if (xs->xs_control & XS_CTL_DATA_IN)
169 		  op = BUS_DMASYNC_POSTREAD;
170 		else
171 		  op = BUS_DMASYNC_POSTWRITE;
172 
173 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
174 				scb->dmamap->dm_mapsize, op);
175                 bus_dmamap_unload(ahd->parent_dmat, scb->dmamap);
176         }
177 
178 	/*
179 	 * If the recovery SCB completes, we have to be
180 	 * out of our timeout.
181 	 */
182 	if ((scb->flags & SCB_RECOVERY_SCB) != 0) {
183 		struct	scb *list_scb;
184 
185 		/*
186 		 * We were able to complete the command successfully,
187 		 * so reinstate the timeouts for all other pending
188 		 * commands.
189 		 */
190 		LIST_FOREACH(list_scb, &ahd->pending_scbs, pending_links) {
191 			struct scsipi_xfer	*txs = list_scb->xs;
192 
193 			if (!(txs->xs_control & XS_CTL_POLL)) {
194                                 callout_reset(&txs->xs_callout,
195                                     (txs->timeout > 1000000) ?
196                                     (txs->timeout / 1000) * hz :
197                                     (txs->timeout * hz) / 1000,
198                                     ahd_timeout, list_scb);
199                         }
200 		}
201 
202 		if (ahd_get_transaction_status(scb) != XS_NOERROR)
203 		  ahd_set_transaction_status(scb, XS_TIMEOUT);
204                 scsipi_printaddr(xs->xs_periph);
205 		printf("%s: no longer in timeout, status = %x\n",
206 		       ahd_name(ahd), xs->status);
207 	}
208 
209 	if (xs->error != XS_NOERROR) {
210                 /* Don't clobber any existing error state */
211 	} else if ((xs->status == SCSI_STATUS_BUSY) ||
212 		   (xs->status == SCSI_STATUS_QUEUE_FULL)) {
213 	  	ahd_set_transaction_status(scb, XS_BUSY);
214 		printf("%s: drive (ID %d, LUN %d) queue full (SCB 0x%x)\n",
215 		       ahd_name(ahd), SCB_GET_TARGET(ahd,scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb));
216         } else if ((scb->flags & SCB_SENSE) != 0) {
217                 /*
218                  * We performed autosense retrieval.
219                  *
220                  * zero the sense data before having
221                  * the drive fill it.  The SCSI spec mandates
222                  * that any untransferred data should be
223                  * assumed to be zero.  Complete the 'bounce'
224                  * of sense information through buffers accessible
225                  * via bus-space by copying it into the clients
226                  * csio.
227                  */
228                 memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
229                 memcpy(&xs->sense.scsi_sense, ahd_get_sense_buf(ahd, scb),
230 		       sizeof(struct scsi_sense_data));
231 
232                 ahd_set_transaction_status(scb, XS_SENSE);
233         } else if ((scb->flags & SCB_PKT_SENSE) != 0) {
234 		struct scsi_status_iu_header *siu;
235 		u_int sense_len;
236 #ifdef AHD_DEBUG
237 		int i;
238 #endif
239 		/*
240 		 * Copy only the sense data into the provided buffer.
241 		 */
242 		siu = (struct scsi_status_iu_header *)scb->sense_data;
243 		sense_len = MIN(scsi_4btoul(siu->sense_length),
244 				sizeof(xs->sense.scsi_sense));
245 		memset(&xs->sense.scsi_sense, 0, sizeof(xs->sense.scsi_sense));
246 		memcpy(&xs->sense.scsi_sense,
247 		       scb->sense_data + SIU_SENSE_OFFSET(siu), sense_len);
248 #ifdef AHD_DEBUG
249 		printf("Copied %d bytes of sense data offset %d:", sense_len,
250 		       SIU_SENSE_OFFSET(siu));
251 		for (i = 0; i < sense_len; i++)
252 			printf(" 0x%x", ((uint8_t *)&xs->sense.scsi_sense)[i]);
253 		printf("\n");
254 #endif
255                 ahd_set_transaction_status(scb, XS_SENSE);
256 	}
257 
258 	if (scb->flags & SCB_FREEZE_QUEUE) {
259 	        scsipi_periph_thaw(periph, 1);
260                 scb->flags &= ~SCB_FREEZE_QUEUE;
261         }
262 
263         if (scb->flags & SCB_REQUEUE)
264                 ahd_set_transaction_status(scb, XS_REQUEUE);
265 
266         ahd_lock(ahd, &s);
267         ahd_free_scb(ahd, scb);
268         ahd_unlock(ahd, &s);
269 
270         scsipi_done(xs);
271 }
272 
273 static void
274 ahd_action(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
275 {
276         struct ahd_softc *ahd;
277 	struct ahd_initiator_tinfo *tinfo;
278 	struct ahd_tmode_tstate *tstate;
279 
280 	ahd = (void *)chan->chan_adapter->adapt_dev;
281 
282 	switch(req) {
283 
284 	case ADAPTER_REQ_RUN_XFER:
285 	  {
286 		struct scsipi_xfer *xs;
287         	struct scsipi_periph *periph;
288 	        struct scb *scb;
289         	struct hardware_scb *hscb;
290 		u_int target_id;
291 		u_int our_id;
292 		u_int col_idx;
293 		char channel;
294 		int s;
295 
296 	  	xs = arg;
297                 periph = xs->xs_periph;
298 
299                 SC_DEBUG(periph, SCSIPI_DB3, ("ahd_action\n"));
300 
301 		target_id = periph->periph_target;
302                 our_id = ahd->our_id;
303                 channel = (chan->chan_channel == 1) ? 'B' : 'A';
304 
305                 /*
306 		 * get an scb to use.
307 		 */
308 		ahd_lock(ahd, &s);
309 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id,
310 					    target_id, &tstate);
311 
312 		if (xs->xs_tag_type != 0 ||
313 		    (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0)
314 			col_idx = AHD_NEVER_COL_IDX;
315 		else
316 			col_idx = AHD_BUILD_COL_IDX(target_id,
317 			    periph->periph_lun);
318 
319 		if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
320 			xs->error = XS_RESOURCE_SHORTAGE;
321 			ahd_unlock(ahd, &s);
322 			scsipi_done(xs);
323 			return;
324 		}
325 		ahd_unlock(ahd, &s);
326 
327 		hscb = scb->hscb;
328 
329 		SC_DEBUG(periph, SCSIPI_DB3, ("start scb(%p)\n", scb));
330 		scb->xs = xs;
331 
332 		/*
333 		 * Put all the arguments for the xfer in the scb
334 		 */
335 		hscb->control = 0;
336 		hscb->scsiid = BUILD_SCSIID(ahd, sim, target_id, our_id);
337 		hscb->lun = periph->periph_lun;
338 		if (xs->xs_control & XS_CTL_RESET) {
339 			hscb->cdb_len = 0;
340 			scb->flags |= SCB_DEVICE_RESET;
341 			hscb->control |= MK_MESSAGE;
342 			hscb->task_management = SIU_TASKMGMT_LUN_RESET;
343 			ahd_execute_scb(scb, NULL, 0);
344 		} else {
345 			hscb->task_management = 0;
346 		}
347 
348 		ahd_setup_data(ahd, xs, scb);
349 		break;
350 	  }
351 
352 	case ADAPTER_REQ_GROW_RESOURCES:
353 #ifdef AHC_DEBUG
354 		printf("%s: ADAPTER_REQ_GROW_RESOURCES\n", ahd_name(ahd));
355 #endif
356 		chan->chan_adapter->adapt_openings += ahd_alloc_scbs(ahd);
357 		if (ahd->scb_data.numscbs >= AHD_SCB_MAX_ALLOC)
358 			chan->chan_flags &= ~SCSIPI_CHAN_CANGROW;
359 		break;
360 
361 	case ADAPTER_REQ_SET_XFER_MODE:
362 	    {
363 		struct scsipi_xfer_mode *xm = arg;
364 		struct ahd_devinfo devinfo;
365 		int target_id, our_id, first;
366 		u_int width;
367 		int s;
368 		char channel;
369 		u_int ppr_options = 0, period, offset;
370 		uint16_t old_autoneg;
371 
372 		target_id = xm->xm_target;
373 		our_id = chan->chan_id;
374 		channel = 'A';
375 		s = splbio();
376 		tinfo = ahd_fetch_transinfo(ahd, channel, our_id, target_id,
377 		    &tstate);
378 		ahd_compile_devinfo(&devinfo, our_id, target_id,
379 		    0, channel, ROLE_INITIATOR);
380 
381 		old_autoneg = tstate->auto_negotiate;
382 
383 		/*
384 		 * XXX since the period and offset are not provided here,
385 		 * fake things by forcing a renegotiation using the user
386 		 * settings if this is called for the first time (i.e.
387 		 * during probe). Also, cap various values at the user
388 		 * values, assuming that the user set it up that way.
389 		 */
390 		if (ahd->inited_target[target_id] == 0) {
391 			period = tinfo->user.period;
392 			offset = tinfo->user.offset;
393 			ppr_options = tinfo->user.ppr_options;
394 			width = tinfo->user.width;
395 			tstate->tagenable |=
396 			    (ahd->user_tagenable & devinfo.target_mask);
397 			tstate->discenable |=
398 			    (ahd->user_discenable & devinfo.target_mask);
399 			ahd->inited_target[target_id] = 1;
400 			first = 1;
401 		} else
402 			first = 0;
403 
404 		if (xm->xm_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
405 			width = MSG_EXT_WDTR_BUS_16_BIT;
406 		else
407 			width = MSG_EXT_WDTR_BUS_8_BIT;
408 
409 		ahd_validate_width(ahd, NULL, &width, ROLE_UNKNOWN);
410 		if (width > tinfo->user.width)
411 			width = tinfo->user.width;
412 		ahd_set_width(ahd, &devinfo, width, AHD_TRANS_GOAL, FALSE);
413 
414 		if (!(xm->xm_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT))) {
415 			period = 0;
416 			offset = 0;
417 			ppr_options = 0;
418 		}
419 
420 		if ((xm->xm_mode & PERIPH_CAP_DT) &&
421 		    (tinfo->user.ppr_options & MSG_EXT_PPR_DT_REQ))
422 			ppr_options |= MSG_EXT_PPR_DT_REQ;
423 		else
424 			ppr_options &= ~MSG_EXT_PPR_DT_REQ;
425 
426 		if ((tstate->discenable & devinfo.target_mask) == 0 ||
427 		    (tstate->tagenable & devinfo.target_mask) == 0)
428 			ppr_options &= ~MSG_EXT_PPR_IU_REQ;
429 
430 		if ((xm->xm_mode & PERIPH_CAP_TQING) &&
431 		    (ahd->user_tagenable & devinfo.target_mask))
432 			tstate->tagenable |= devinfo.target_mask;
433 		else
434 			tstate->tagenable &= ~devinfo.target_mask;
435 
436 		ahd_find_syncrate(ahd, &period, &ppr_options, AHD_SYNCRATE_MAX);
437 		ahd_validate_offset(ahd, NULL, period, &offset,
438 		    MSG_EXT_WDTR_BUS_8_BIT, ROLE_UNKNOWN);
439 		if (offset == 0) {
440 			period = 0;
441 			ppr_options = 0;
442 		}
443 		if (ppr_options != 0
444 		    && tinfo->user.transport_version >= 3) {
445 			tinfo->goal.transport_version =
446 			    tinfo->user.transport_version;
447 			tinfo->curr.transport_version =
448 			    tinfo->user.transport_version;
449 		}
450 
451 		ahd_set_syncrate(ahd, &devinfo, period, offset,
452 		    ppr_options, AHD_TRANS_GOAL, FALSE);
453 
454 		/*
455 		 * If this is the first request, and no negotiation is
456 		 * needed, just confirm the state to the scsipi layer,
457 		 * so that it can print a message.
458 		 */
459 		if (old_autoneg == tstate->auto_negotiate && first) {
460 			xm->xm_mode = 0;
461 			xm->xm_period = tinfo->curr.period;
462 			xm->xm_offset = tinfo->curr.offset;
463 			if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
464 				xm->xm_mode |= PERIPH_CAP_WIDE16;
465 			if (tinfo->curr.period)
466 				xm->xm_mode |= PERIPH_CAP_SYNC;
467 			if (tstate->tagenable & devinfo.target_mask)
468 				xm->xm_mode |= PERIPH_CAP_TQING;
469 			if (tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ)
470 				xm->xm_mode |= PERIPH_CAP_DT;
471 			scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, xm);
472 		}
473 		splx(s);
474 	    }
475 	}
476 
477 	return;
478 }
479 
480 static void
481 ahd_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments)
482 {
483 	struct scb *scb;
484 	struct scsipi_xfer *xs;
485         struct ahd_softc *ahd;
486 	struct ahd_initiator_tinfo *tinfo;
487 	struct ahd_tmode_tstate *tstate;
488 	u_int  mask;
489         int    s;
490 
491 	scb = (struct scb*)arg;
492 	xs = scb->xs;
493 	xs->error = 0;
494 	xs->status = 0;
495 	xs->xs_status = 0;
496 	ahd = (void*)xs->xs_periph->periph_channel->chan_adapter->adapt_dev;
497 
498 	scb->sg_count = 0;
499 	if (nsegments != 0) {
500 		void *sg;
501 		int op;
502 		u_int i;
503 
504 		ahd_setup_data_scb(ahd, scb);
505 
506 		/* Copy the segments into our SG list */
507 		for (i = nsegments, sg = scb->sg_list; i > 0; i--) {
508 
509 			sg = ahd_sg_setup(ahd, scb, sg, dm_segs->ds_addr,
510 					  dm_segs->ds_len,
511 					  /*last*/i == 1);
512 			dm_segs++;
513 		}
514 
515 		if (xs->xs_control & XS_CTL_DATA_IN)
516 			op = BUS_DMASYNC_PREREAD;
517 		else
518 			op = BUS_DMASYNC_PREWRITE;
519 
520 		bus_dmamap_sync(ahd->parent_dmat, scb->dmamap, 0,
521 				scb->dmamap->dm_mapsize, op);
522 	}
523 
524 	ahd_lock(ahd, &s);
525 
526 	/*
527 	 * Last time we need to check if this SCB needs to
528 	 * be aborted.
529 	 */
530 	if (ahd_get_scsi_status(scb) == XS_STS_DONE) {
531 		if (nsegments != 0)
532 			bus_dmamap_unload(ahd->parent_dmat,
533 					  scb->dmamap);
534 		ahd_free_scb(ahd, scb);
535 		ahd_unlock(ahd, &s);
536 		return;
537 	}
538 
539 	tinfo = ahd_fetch_transinfo(ahd, SCSIID_CHANNEL(ahd, scb->hscb->scsiid),
540 				    SCSIID_OUR_ID(scb->hscb->scsiid),
541 				    SCSIID_TARGET(ahd, scb->hscb->scsiid),
542 				    &tstate);
543 
544 	mask = SCB_GET_TARGET_MASK(ahd, scb);
545 
546 	if ((tstate->discenable & mask) != 0)
547 		scb->hscb->control |= DISCENB;
548 
549 	if ((tstate->tagenable & mask) != 0)
550 		scb->hscb->control |= xs->xs_tag_type|TAG_ENB;
551 
552 	if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU) != 0) {
553 		scb->flags |= SCB_PACKETIZED;
554 		if (scb->hscb->task_management != 0)
555 			scb->hscb->control &= ~MK_MESSAGE;
556 	}
557 
558 #if 0	/* This looks like it makes sense at first, but it can loop */
559 	if ((xs->xs_control & XS_CTL_DISCOVERY) &&
560 	    (tinfo->goal.width != 0
561 	     || tinfo->goal.period != 0
562 	     || tinfo->goal.ppr_options != 0)) {
563 		scb->flags |= SCB_NEGOTIATE;
564 		scb->hscb->control |= MK_MESSAGE;
565 	} else
566 #endif
567 	if ((tstate->auto_negotiate & mask) != 0) {
568 	  	scb->flags |= SCB_AUTO_NEGOTIATE;
569 		scb->hscb->control |= MK_MESSAGE;
570 	}
571 
572 	LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
573 
574 	scb->flags |= SCB_ACTIVE;
575 
576 	if (!(xs->xs_control & XS_CTL_POLL)) {
577 		callout_reset(&scb->xs->xs_callout, xs->timeout > 1000000 ?
578 			      (xs->timeout / 1000) * hz : (xs->timeout * hz) / 1000,
579 			      ahd_timeout, scb);
580 	}
581 
582 	if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) {
583 		/* Define a mapping from our tag to the SCB. */
584 		ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
585 		ahd_pause(ahd);
586 		ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
587 		ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG);
588 		ahd_unpause(ahd);
589 	} else {
590 		ahd_queue_scb(ahd, scb);
591 	}
592 
593 	if (!(xs->xs_control & XS_CTL_POLL)) {
594                 ahd_unlock(ahd, &s);
595                 return;
596         }
597         /*
598          * If we can't use interrupts, poll for completion
599          */
600         SC_DEBUG(xs->xs_periph, SCSIPI_DB3, ("cmd_poll\n"));
601         do {
602                 if (ahd_poll(ahd, xs->timeout)) {
603                         if (!(xs->xs_control & XS_CTL_SILENT))
604                                 printf("cmd fail\n");
605                         ahd_timeout(scb);
606                         break;
607                 }
608         } while (!(xs->xs_status & XS_STS_DONE));
609 
610 	ahd_unlock(ahd, &s);
611 }
612 
613 static int
614 ahd_poll(struct ahd_softc *ahd, int wait)
615 {
616 
617 	while (--wait) {
618                 DELAY(1000);
619                 if (ahd_inb(ahd, INTSTAT) & INT_PEND)
620                         break;
621         }
622 
623         if (wait == 0) {
624                 printf("%s: board is not responding\n", ahd_name(ahd));
625                 return (EIO);
626         }
627 
628         ahd_intr((void *)ahd);
629         return (0);
630 }
631 
632 
633 static void
634 ahd_setup_data(struct ahd_softc *ahd, struct scsipi_xfer *xs,
635 	       struct scb *scb)
636 {
637 	struct hardware_scb *hscb;
638 
639 	hscb = scb->hscb;
640 	xs->resid = xs->status = 0;
641 
642 	hscb->cdb_len = xs->cmdlen;
643 	if (hscb->cdb_len > MAX_CDB_LEN) {
644 		int s;
645 		/*
646 		 * Should CAM start to support CDB sizes
647 		 * greater than 16 bytes, we could use
648 		 * the sense buffer to store the CDB.
649 		 */
650 		ahd_set_transaction_status(scb,
651 					   XS_DRIVER_STUFFUP);
652 
653 		ahd_lock(ahd, &s);
654 		ahd_free_scb(ahd, scb);
655 		ahd_unlock(ahd, &s);
656 		scsipi_done(xs);
657 	}
658 	memcpy(hscb->shared_data.idata.cdb, xs->cmd, hscb->cdb_len);
659 
660 	/* Only use S/G if there is a transfer */
661         if (xs->datalen) {
662                 int error;
663 
664                 error = bus_dmamap_load(ahd->parent_dmat,
665 					scb->dmamap, xs->data,
666 					xs->datalen, NULL,
667 					((xs->xs_control & XS_CTL_NOSLEEP) ?
668 					 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) |
669 					BUS_DMA_STREAMING |
670 					((xs->xs_control & XS_CTL_DATA_IN) ?
671 					 BUS_DMA_READ : BUS_DMA_WRITE));
672                 if (error) {
673 #ifdef AHD_DEBUG
674                         printf("%s: in ahc_setup_data(): bus_dmamap_load() "
675 			       "= %d\n",
676 			       ahd_name(ahd), error);
677 #endif
678                         xs->error = XS_RESOURCE_SHORTAGE;
679                         scsipi_done(xs);
680                         return;
681                 }
682                 ahd_execute_scb(scb,
683 				scb->dmamap->dm_segs,
684 				scb->dmamap->dm_nsegs);
685         } else {
686                 ahd_execute_scb(scb, NULL, 0);
687         }
688 }
689 
690 void
691 ahd_timeout(void *arg)
692 {
693 	struct	scb	  *scb;
694 	struct	ahd_softc *ahd;
695 	ahd_mode_state	   saved_modes;
696 	int		   s;
697 
698 	scb = (struct scb *)arg;
699 	ahd = (struct ahd_softc *)scb->ahd_softc;
700 
701 	printf("%s: ahd_timeout\n", ahd_name(ahd));
702 
703 	ahd_lock(ahd, &s);
704 
705 	ahd_pause_and_flushwork(ahd);
706 	saved_modes = ahd_save_modes(ahd);
707 #if 0
708 	ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
709 	ahd_outb(ahd, SCSISIGO, ACKO);
710 	printf("set ACK\n");
711 	ahd_outb(ahd, SCSISIGO, 0);
712 	printf("clearing Ack\n");
713 	ahd_restore_modes(ahd, saved_modes);
714 #endif
715 	if ((scb->flags & SCB_ACTIVE) == 0) {
716 		/* Previous timeout took care of me already */
717 		printf("%s: Timedout SCB already complete. "
718 		       "Interrupts may not be functioning.\n", ahd_name(ahd));
719 		ahd_unpause(ahd);
720 		ahd_unlock(ahd, &s);
721 		return;
722 	}
723 
724 	ahd_print_path(ahd, scb);
725 	printf("SCB 0x%x - timed out\n", SCB_GET_TAG(scb));
726 	ahd_dump_card_state(ahd);
727 	ahd_reset_channel(ahd, SIM_CHANNEL(ahd, sim),
728 			  /*initiate reset*/TRUE);
729 	ahd_unlock(ahd, &s);
730 	return;
731 }
732 
733 int
734 ahd_platform_alloc(struct ahd_softc *ahd, void *platform_arg)
735 {
736 	ahd->platform_data = malloc(sizeof(struct ahd_platform_data), M_DEVBUF,
737 				    M_NOWAIT /*| M_ZERO*/);
738 	if (ahd->platform_data == NULL)
739 		return (ENOMEM);
740 
741 	memset(ahd->platform_data, 0, sizeof(struct ahd_platform_data));
742 
743 	return (0);
744 }
745 
746 void
747 ahd_platform_free(struct ahd_softc *ahd)
748 {
749 	free(ahd->platform_data, M_DEVBUF);
750 }
751 
752 int
753 ahd_softc_comp(struct ahd_softc *lahd, struct ahd_softc *rahd)
754 {
755 	/* We don't sort softcs under NetBSD so report equal always */
756 	return (0);
757 }
758 
759 int
760 ahd_detach(struct device *self, int flags)
761 {
762 	int rv = 0;
763 
764 	struct ahd_softc *ahd = (struct ahd_softc*)self;
765 
766 	if (ahd->sc_child != NULL)
767 		rv = config_detach((void *)ahd->sc_child, flags);
768 
769 	shutdownhook_disestablish(ahd->shutdown_hook);
770 
771 	ahd_free(ahd);
772 
773 	return rv;
774 }
775 
776 void
777 ahd_platform_set_tags(struct ahd_softc *ahd,
778 		      struct ahd_devinfo *devinfo, ahd_queue_alg alg)
779 {
780         struct ahd_tmode_tstate *tstate;
781 
782         ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid,
783                             devinfo->target, &tstate);
784 
785         if (alg != AHD_QUEUE_NONE)
786                 tstate->tagenable |= devinfo->target_mask;
787 	else
788 	  	tstate->tagenable &= ~devinfo->target_mask;
789 }
790 
791 void
792 ahd_send_async(struct ahd_softc *ahc, char channel, u_int target, u_int lun,
793 	       ac_code code, void *opt_arg)
794 {
795 	struct ahd_tmode_tstate *tstate;
796 	struct ahd_initiator_tinfo *tinfo;
797 	struct ahd_devinfo devinfo;
798 	struct scsipi_channel *chan;
799 	struct scsipi_xfer_mode xm;
800 
801 #ifdef DIAGNOSTIC
802 	if (channel != 'A')
803 		panic("ahd_send_async: not channel A");
804 #endif
805 	chan = &ahc->sc_channel;
806 	switch (code) {
807 	case AC_TRANSFER_NEG:
808 		tinfo = ahd_fetch_transinfo(ahc, channel, ahc->our_id, target,
809 			    &tstate);
810 		ahd_compile_devinfo(&devinfo, ahc->our_id, target, lun,
811 		    channel, ROLE_UNKNOWN);
812 		/*
813 		 * Don't bother if negotiating. XXX?
814 		 */
815 		if (tinfo->curr.period != tinfo->goal.period
816 		    || tinfo->curr.width != tinfo->goal.width
817 		    || tinfo->curr.offset != tinfo->goal.offset
818 		    || tinfo->curr.ppr_options != tinfo->goal.ppr_options)
819 			break;
820 		xm.xm_target = target;
821 		xm.xm_mode = 0;
822 		xm.xm_period = tinfo->curr.period;
823 		xm.xm_offset = tinfo->curr.offset;
824 		if (tinfo->goal.ppr_options & MSG_EXT_PPR_DT_REQ)
825 			xm.xm_mode |= PERIPH_CAP_DT;
826 		if (tinfo->curr.width == MSG_EXT_WDTR_BUS_16_BIT)
827 			xm.xm_mode |= PERIPH_CAP_WIDE16;
828 		if (tinfo->curr.period)
829 			xm.xm_mode |= PERIPH_CAP_SYNC;
830 		if (tstate->tagenable & devinfo.target_mask)
831 			xm.xm_mode |= PERIPH_CAP_TQING;
832 		scsipi_async_event(chan, ASYNC_EVENT_XFER_MODE, &xm);
833 		break;
834 	case AC_BUS_RESET:
835 		scsipi_async_event(chan, ASYNC_EVENT_RESET, NULL);
836 	case AC_SENT_BDR:
837 	default:
838 		break;
839 	}
840 }
841