xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /* $NetBSD: isp_netbsd.c,v 1.27 2000/07/07 03:14:53 mjacob Exp $ */
2 /*
3  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4  * Matthew Jacob <mjacob@nas.nasa.gov>
5  */
6 /*
7  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
8  * All rights reserved.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. The name of the author may not be used to endorse or promote products
19  *    derived from this software without specific prior written permission
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
22  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
23  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
24  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
26  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
30  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <dev/ic/isp_netbsd.h>
34 #include <sys/scsiio.h>
35 
36 
37 /*
38  * Set a timeout for the watchdogging of a command.
39  *
40  * The dimensional analysis is
41  *
42  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
43  *
44  *			=
45  *
46  *	(milliseconds / 1000) * hz = ticks
47  *
48  *
49  * For timeouts less than 1 second, we'll get zero. Because of this, and
50  * because we want to establish *our* timeout to be longer than what the
51  * firmware might do, we just add 3 seconds at the back end.
52  */
53 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
54 
55 static void ispminphys __P((struct buf *));
56 static int32_t ispcmd_slow __P((ISP_SCSI_XFER_T *));
57 static int32_t ispcmd __P((ISP_SCSI_XFER_T *));
58 static int
59 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
60 
61 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
62 static int isp_polled_cmd __P((struct ispsoftc *, ISP_SCSI_XFER_T *));
63 static void isp_dog __P((void *));
64 static void isp_command_requeue __P((void *));
65 static void isp_internal_restart __P((void *));
66 
67 /*
68  * Complete attachment of hardware, include subdevices.
69  */
70 void
71 isp_attach(isp)
72 	struct ispsoftc *isp;
73 {
74 	int maxluns = isp->isp_maxluns - 1;
75 
76 	isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
77 	isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
78 
79 	isp->isp_state = ISP_RUNSTATE;
80 	isp->isp_osinfo._link.scsipi_scsi.channel =
81 	    (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
82 	isp->isp_osinfo._link.adapter_softc = isp;
83 	isp->isp_osinfo._link.device = &isp_dev;
84 	isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
85 	isp->isp_osinfo._link.openings = isp->isp_maxcmds;
86 	isp->isp_osinfo._link.scsipi_scsi.max_lun = maxluns;
87 	TAILQ_INIT(&isp->isp_osinfo.waitq);	/* XXX 2nd Bus? */
88 
89 	if (IS_FC(isp)) {
90 		/*
91 		 * Give it another chance here to come alive...
92 		 */
93 		isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
94 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
95 		/*
96 		 * But we have to be reasonable until the midlayer is fixed.
97 		 */
98 		if (maxluns > 255)
99 			isp->isp_osinfo._link.scsipi_scsi.max_lun = 255;
100 	} else {
101 		sdparam *sdp = isp->isp_param;
102 		isp->isp_osinfo._adapter.scsipi_cmd = ispcmd_slow;
103 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
104 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
105 		    sdp->isp_initiator_id;
106 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
107 		/*
108 		 * But we have to be reasonable until the midlayer is fixed.
109 		 */
110 		if (maxluns > 7)
111 			isp->isp_osinfo._link.scsipi_scsi.max_lun = 7;
112 		if (IS_DUALBUS(isp)) {
113 			isp->isp_osinfo._link_b = isp->isp_osinfo._link;
114 			sdp++;
115 			isp->isp_osinfo.discovered[1] =
116 			    1 << sdp->isp_initiator_id;
117 			isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
118 			    sdp->isp_initiator_id;
119 			isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
120 			isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
121 			    isp->isp_osinfo._link.scsipi_scsi.max_lun;
122 		}
123 	}
124 	isp->isp_osinfo._link.type = BUS_SCSI;
125 
126 	/*
127 	 * Send a SCSI Bus Reset.
128 	 */
129 	if (IS_SCSI(isp)) {
130 		int bus = 0;
131 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
132 		if (IS_DUALBUS(isp)) {
133 			bus++;
134 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
135 		}
136 	} else {
137 		int i, j;
138 		fcparam *fcp = isp->isp_param;
139 		delay(2 * 1000000);
140 		for (j = 0; j < 5; j++) {
141 			for (i = 0; i < 5; i++) {
142 				if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL))
143 					continue;
144 #ifdef	ISP2100_FABRIC
145 				/*
146 				 * Wait extra time to see if the f/w
147 				 * eventually completed an FLOGI that
148 				 * will allow us to know we're on a
149 				 * fabric.
150 				 */
151 				if (fcp->isp_onfabric == 0) {
152 					delay(1 * 1000000);
153 					continue;
154 				}
155 #endif
156 				break;
157 			}
158 			if (fcp->isp_fwstate == FW_READY &&
159 			    fcp->isp_loopstate >= LOOP_PDB_RCVD) {
160 				break;
161 			}
162 		}
163 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
164 			fcp->isp_loopid;
165 	}
166 
167 	/*
168 	 * And attach children (if any).
169 	 */
170 	config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
171 	if (IS_DUALBUS(isp)) {
172 		config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
173 	}
174 }
175 
176 /*
177  * minphys our xfers
178  *
179  * Unfortunately, the buffer pointer describes the target device- not the
180  * adapter device, so we can't use the pointer to find out what kind of
181  * adapter we are and adjust accordingly.
182  */
183 
184 static void
185 ispminphys(bp)
186 	struct buf *bp;
187 {
188 	/*
189 	 * XX: Only the 1020 has a 24 bit limit.
190 	 */
191 	if (bp->b_bcount >= (1 << 24)) {
192 		bp->b_bcount = (1 << 24);
193 	}
194 	minphys(bp);
195 }
196 
197 static int32_t
198 ispcmd_slow(xs)
199 	ISP_SCSI_XFER_T *xs;
200 {
201 	sdparam *sdp;
202 	int tgt, chan, s;
203 	u_int16_t flags;
204 	struct ispsoftc *isp = XS_ISP(xs);
205 
206 	/*
207 	 * Have we completed discovery for this target on this adapter?
208 	 */
209 	tgt = XS_TGT(xs);
210 	chan = XS_CHANNEL(xs);
211 	if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 ||
212 	    (isp->isp_osinfo.discovered[chan] & (1 << tgt)) != 0) {
213 		return (ispcmd(xs));
214 	}
215 
216 	flags = DPARM_DEFAULT;
217 	if (xs->sc_link->quirks & SDEV_NOSYNC) {
218 		flags ^= DPARM_SYNC;
219 #ifdef	DEBUG
220 	} else {
221 		printf("%s: channel %d target %d can do SYNC xfers\n",
222 		    isp->isp_name, chan, tgt);
223 #endif
224 	}
225 	if (xs->sc_link->quirks & SDEV_NOWIDE) {
226 		flags ^= DPARM_WIDE;
227 #ifdef	DEBUG
228 	} else {
229 		printf("%s: channel %d target %d can do WIDE xfers\n",
230 		    isp->isp_name, chan, tgt);
231 #endif
232 	}
233 	if (xs->sc_link->quirks & SDEV_NOTAG) {
234 		flags ^= DPARM_TQING;
235 #ifdef	DEBUG
236 	} else {
237 		printf("%s: channel %d target %d can do TAGGED xfers\n",
238 		    isp->isp_name, chan, tgt);
239 #endif
240 	}
241 	/*
242 	 * Okay, we know about this device now,
243 	 * so mark parameters to be updated for it.
244 	 */
245 	s = splbio();
246 	isp->isp_osinfo.discovered[chan] |= (1 << tgt);
247 	sdp = isp->isp_param;
248 	sdp += chan;
249 	sdp->isp_devparam[tgt].dev_flags = flags;
250 	sdp->isp_devparam[tgt].dev_update = 1;
251 	isp->isp_update |= (1 << chan);
252 	splx(s);
253 	return (ispcmd(xs));
254 }
255 
256 static int
257 ispioctl(sc_link, cmd, addr, flag, p)
258 	struct scsipi_link *sc_link;
259 	u_long cmd;
260 	caddr_t addr;
261 	int flag;
262 	struct proc *p;
263 {
264 	struct ispsoftc *isp = sc_link->adapter_softc;
265 	int s, chan, retval = ENOTTY;
266 
267 	switch (cmd) {
268 	case SCBUSIORESET:
269 		chan = sc_link->scsipi_scsi.channel;
270 		s = splbio();
271 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
272 			retval = EIO;
273 		else
274 			retval = 0;
275 		(void) splx(s);
276 		break;
277 	default:
278 		break;
279 	}
280 	return (retval);
281 }
282 
283 
284 static int32_t
285 ispcmd(xs)
286 	ISP_SCSI_XFER_T *xs;
287 {
288 	struct ispsoftc *isp;
289 	int result, s;
290 
291 	isp = XS_ISP(xs);
292 	s = splbio();
293 	if (isp->isp_state < ISP_RUNSTATE) {
294 		DISABLE_INTS(isp);
295 		isp_init(isp);
296                 if (isp->isp_state != ISP_INITSTATE) {
297 			ENABLE_INTS(isp);
298                         (void) splx(s);
299                         XS_SETERR(xs, HBA_BOTCH);
300                         return (COMPLETE);
301                 }
302                 isp->isp_state = ISP_RUNSTATE;
303 		ENABLE_INTS(isp);
304         }
305 
306 	/*
307 	 * Check for queue blockage...
308 	 */
309 	if (isp->isp_osinfo.blocked) {
310 		if (xs->xs_control & XS_CTL_POLL) {
311 			xs->error = XS_DRIVER_STUFFUP;
312 			splx(s);
313 			return (TRY_AGAIN_LATER);
314 		}
315 		TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
316 		splx(s);
317 		return (SUCCESSFULLY_QUEUED);
318 	}
319 
320 	if (xs->xs_control & XS_CTL_POLL) {
321 		result = isp_polled_cmd(isp, xs);
322 		(void) splx(s);
323 		return (result);
324 	}
325 
326 	result = ispscsicmd(xs);
327 	switch (result) {
328 	case CMD_QUEUED:
329 		result = SUCCESSFULLY_QUEUED;
330 		if (xs->timeout) {
331 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
332 		}
333 		break;
334 	case CMD_EAGAIN:
335 		result = TRY_AGAIN_LATER;
336 		break;
337 	case CMD_RQLATER:
338 		result = SUCCESSFULLY_QUEUED;
339 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
340 		break;
341 	case CMD_COMPLETE:
342 		result = COMPLETE;
343 		break;
344 	}
345 	(void) splx(s);
346 	return (result);
347 }
348 
349 static int
350 isp_polled_cmd(isp, xs)
351 	struct ispsoftc *isp;
352 	ISP_SCSI_XFER_T *xs;
353 {
354 	int result;
355 	int infinite = 0, mswait;
356 
357 	result = ispscsicmd(xs);
358 
359 	switch (result) {
360 	case CMD_QUEUED:
361 		result = SUCCESSFULLY_QUEUED;
362 		break;
363 	case CMD_RQLATER:
364 	case CMD_EAGAIN:
365 		if (XS_NOERR(xs)) {
366 			xs->error = XS_DRIVER_STUFFUP;
367 		}
368 		result = TRY_AGAIN_LATER;
369 		break;
370 	case CMD_COMPLETE:
371 		result = COMPLETE;
372 		break;
373 
374 	}
375 
376 	if (result != SUCCESSFULLY_QUEUED) {
377 		return (result);
378 	}
379 
380 	/*
381 	 * If we can't use interrupts, poll on completion.
382 	 */
383 	if ((mswait = XS_TIME(xs)) == 0)
384 		infinite = 1;
385 
386 	while (mswait || infinite) {
387 		if (isp_intr((void *)isp)) {
388 			if (XS_CMD_DONE_P(xs)) {
389 				break;
390 			}
391 		}
392 		SYS_DELAY(1000);
393 		mswait -= 1;
394 	}
395 
396 	/*
397 	 * If no other error occurred but we didn't finish,
398 	 * something bad happened.
399 	 */
400 	if (XS_CMD_DONE_P(xs) == 0) {
401 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
402 			isp_restart(isp);
403 		}
404 		if (XS_NOERR(xs)) {
405 			XS_SETERR(xs, HBA_BOTCH);
406 		}
407 	}
408 	result = COMPLETE;
409 	return (result);
410 }
411 
412 void
413 isp_done(xs)
414 	ISP_SCSI_XFER_T *xs;
415 {
416 	XS_CMD_S_DONE(xs);
417 	if (XS_CMD_WDOG_P(xs) == 0) {
418 		struct ispsoftc *isp = XS_ISP(xs);
419 		callout_stop(&xs->xs_callout);
420 		if (XS_CMD_GRACE_P(xs)) {
421 			PRINTF("%s: finished command on borrowed time\n",
422 			    isp->isp_name);
423 		}
424 		XS_CMD_S_CLEAR(xs);
425 		scsipi_done(xs);
426 	}
427 }
428 
429 static void
430 isp_dog(arg)
431 	void *arg;
432 {
433 	ISP_SCSI_XFER_T *xs = arg;
434 	struct ispsoftc *isp = XS_ISP(xs);
435 	u_int32_t handle;
436 	int s = splbio();
437 
438 	/*
439 	 * We've decided this command is dead. Make sure we're not trying
440 	 * to kill a command that's already dead by getting it's handle and
441 	 * and seeing whether it's still alive.
442 	 */
443 	handle = isp_find_handle(isp, xs);
444 	if (handle) {
445 		u_int16_t r, r1, i;
446 
447 		if (XS_CMD_DONE_P(xs)) {
448 			PRINTF("%s: watchdog found done cmd (handle 0x%x)\n",
449 			    isp->isp_name, handle);
450 			(void) splx(s);
451 			return;
452 		}
453 
454 		if (XS_CMD_WDOG_P(xs)) {
455 			PRINTF("%s: recursive watchdog (handle 0x%x)\n",
456 			    isp->isp_name, handle);
457 			(void) splx(s);
458 			return;
459 		}
460 
461 		XS_CMD_S_WDOG(xs);
462 
463 		i = 0;
464 		do {
465 			r = ISP_READ(isp, BIU_ISR);
466 			SYS_DELAY(1);
467 			r1 = ISP_READ(isp, BIU_ISR);
468 		} while (r != r1 && ++i < 1000);
469 
470 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
471 			IDPRINTF(1, ("%s: watchdog cleanup (%x, %x)\n",
472 			    isp->isp_name, handle, r));
473 			XS_CMD_C_WDOG(xs);
474 			isp_done(xs);
475 		} else if (XS_CMD_GRACE_P(xs)) {
476 			IDPRINTF(1, ("%s: watchdog timeout (%x, %x)\n",
477 			    isp->isp_name, handle, r));
478 			/*
479 			 * Make sure the command is *really* dead before we
480 			 * release the handle (and DMA resources) for reuse.
481 			 */
482 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
483 
484 			/*
485 			 * After this point, the comamnd is really dead.
486 			 */
487 			if (XS_XFRLEN(xs)) {
488 				ISP_DMAFREE(isp, xs, handle);
489 			}
490 			isp_destroy_handle(isp, handle);
491 			XS_SETERR(xs, XS_TIMEOUT);
492 			XS_CMD_S_CLEAR(xs);
493 			isp_done(xs);
494 		} else {
495 			u_int16_t iptr, optr;
496 			ispreq_t *mp;
497 
498 			IDPRINTF(2, ("%s: possible command timeout (%x, %x)\n",
499 			    isp->isp_name, handle, r));
500 
501 			XS_CMD_C_WDOG(xs);
502 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
503 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
504 				(void) splx(s);
505 				return;
506 			}
507 			XS_CMD_S_GRACE(xs);
508 			MEMZERO((void *) mp, sizeof (*mp));
509 			mp->req_header.rqs_entry_count = 1;
510 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
511 			mp->req_modifier = SYNC_ALL;
512 			mp->req_target = XS_CHANNEL(xs) << 7;
513 			ISP_SWIZZLE_REQUEST(isp, mp);
514 			MemoryBarrier();
515 			ISP_ADD_REQUEST(isp, iptr);
516 		}
517 	} else if (isp->isp_dblev) {
518 		PRINTF("%s: watchdog with no command\n", isp->isp_name);
519 	}
520 	(void) splx(s);
521 }
522 
523 /*
524  * Free any associated resources prior to decommissioning and
525  * set the card to a known state (so it doesn't wake up and kick
526  * us when we aren't expecting it to).
527  *
528  * Locks are held before coming here.
529  */
530 void
531 isp_uninit(isp)
532 	struct ispsoftc *isp;
533 {
534 	ISP_ILOCKVAL_DECL;
535 	ISP_ILOCK(isp);
536 	/*
537 	 * Leave with interrupts disabled.
538 	 */
539 	DISABLE_INTS(isp);
540 
541 	ISP_IUNLOCK(isp);
542 }
543 
544 /*
545  * Restart function for a command to be requeued later.
546  */
547 static void
548 isp_command_requeue(arg)
549 	void *arg;
550 {
551 	struct scsipi_xfer *xs = arg;
552 	struct ispsoftc *isp = XS_ISP(xs);
553 	int s = splbio();
554 	switch (ispcmd_slow(xs)) {
555 	case SUCCESSFULLY_QUEUED:
556 		printf("%s: isp_command_requeue: requeued for %d.%d\n",
557 		    isp->isp_name, XS_TGT(xs), XS_LUN(xs));
558 		if (xs->timeout) {
559 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
560 		}
561 		break;
562 	case TRY_AGAIN_LATER:
563 		printf("%s: EAGAIN for %d.%d\n",
564 		    isp->isp_name, XS_TGT(xs), XS_LUN(xs));
565 		/* FALLTHROUGH */
566 	case COMPLETE:
567 		/* can only be an error */
568 		XS_CMD_S_DONE(xs);
569 		callout_stop(&xs->xs_callout);
570 		if (XS_NOERR(xs)) {
571 			XS_SETERR(xs, HBA_BOTCH);
572 		}
573 		scsipi_done(xs);
574 		break;
575 	}
576 	(void) splx(s);
577 }
578 
579 /*
580  * Restart function after a LOOP UP event (e.g.),
581  * done as a timeout for some hysteresis.
582  */
583 static void
584 isp_internal_restart(arg)
585 	void *arg;
586 {
587 	struct ispsoftc *isp = arg;
588 	int result, nrestarted = 0, s;
589 
590 	s = splbio();
591 	if (isp->isp_osinfo.blocked == 0) {
592 		struct scsipi_xfer *xs;
593 		while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
594 			TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
595 			result = ispscsicmd(xs);
596 			if (result != CMD_QUEUED) {
597 				printf("%s: botched command restart (0x%x)\n",
598 				    isp->isp_name, result);
599 				XS_CMD_S_DONE(xs);
600 				if (xs->error == XS_NOERROR)
601 					xs->error = XS_DRIVER_STUFFUP;
602 				callout_stop(&xs->xs_callout);
603 				scsipi_done(xs);
604 			} else if (xs->timeout) {
605 				callout_reset(&xs->xs_callout,
606 				    _XT(xs), isp_dog, xs);
607 			}
608 			nrestarted++;
609 		}
610 		printf("%s: requeued %d commands\n", isp->isp_name, nrestarted);
611 	}
612 	(void) splx(s);
613 }
614 
615 int
616 isp_async(isp, cmd, arg)
617 	struct ispsoftc *isp;
618 	ispasync_t cmd;
619 	void *arg;
620 {
621 	int bus, tgt;
622 	int s = splbio();
623 	switch (cmd) {
624 	case ISPASYNC_NEW_TGT_PARAMS:
625 	if (IS_SCSI(isp) && isp->isp_dblev) {
626 		sdparam *sdp = isp->isp_param;
627 		char *wt;
628 		int mhz, flags, period;
629 
630 		tgt = *((int *) arg);
631 		bus = (tgt >> 16) & 0xffff;
632 		tgt &= 0xffff;
633 		sdp += bus;
634 		flags = sdp->isp_devparam[tgt].cur_dflags;
635 		period = sdp->isp_devparam[tgt].cur_period;
636 
637 		if ((flags & DPARM_SYNC) && period &&
638 		    (sdp->isp_devparam[tgt].cur_offset) != 0) {
639 #if	0
640 			/* CAUSES PANICS */
641 			static char *m = "%s: bus %d now %s mode\n";
642 			u_int16_t r, l;
643 			if (bus == 1)
644 				r = SXP_PINS_DIFF | SXP_BANK1_SELECT;
645 			else
646 				r = SXP_PINS_DIFF;
647 			l = ISP_READ(isp, r) & ISP1080_MODE_MASK;
648 			switch (l) {
649 			case ISP1080_LVD_MODE:
650 				sdp->isp_lvdmode = 1;
651 				printf(m, isp->isp_name, bus, "LVD");
652 				break;
653 			case ISP1080_HVD_MODE:
654 				sdp->isp_diffmode = 1;
655 				printf(m, isp->isp_name, bus, "Differential");
656 				break;
657 			case ISP1080_SE_MODE:
658 				sdp->isp_ultramode = 1;
659 				printf(m, isp->isp_name, bus, "Single-Ended");
660 				break;
661 			default:
662 				printf("%s: unknown mode on bus %d (0x%x)\n",
663 				    isp->isp_name, bus, l);
664 				break;
665 			}
666 #endif
667 			/*
668 			 * There's some ambiguity about our negotiated speed
669 			 * if we haven't detected LVD mode correctly (which
670 			 * seems to happen, unfortunately). If we're in LVD
671 			 * mode, then different rules apply about speed.
672 			 */
673 			if (sdp->isp_lvdmode || period < 0xc) {
674 				switch (period) {
675 				case 0x9:
676 					mhz = 80;
677 					break;
678 				case 0xa:
679 					mhz = 40;
680 					break;
681 				case 0xb:
682 					mhz = 33;
683 					break;
684 				case 0xc:
685 					mhz = 25;
686 					break;
687 				default:
688 					mhz = 1000 / (period * 4);
689 					break;
690 				}
691 			} else {
692 				mhz = 1000 / (period * 4);
693 			}
694 		} else {
695 			mhz = 0;
696 		}
697 		switch (flags & (DPARM_WIDE|DPARM_TQING)) {
698 		case DPARM_WIDE:
699 			wt = ", 16 bit wide\n";
700 			break;
701 		case DPARM_TQING:
702 			wt = ", Tagged Queueing Enabled\n";
703 			break;
704 		case DPARM_WIDE|DPARM_TQING:
705 			wt = ", 16 bit wide, Tagged Queueing Enabled\n";
706 			break;
707 		default:
708 			wt = "\n";
709 			break;
710 		}
711 		if (mhz) {
712 			CFGPRINTF("%s: Bus %d Target %d at %dMHz Max "
713 			    "Offset %d%s", isp->isp_name, bus, tgt, mhz,
714 			    sdp->isp_devparam[tgt].cur_offset, wt);
715 		} else {
716 			CFGPRINTF("%s: Bus %d Target %d Async Mode%s",
717 			    isp->isp_name, bus, tgt, wt);
718 		}
719 		break;
720 	}
721 	case ISPASYNC_BUS_RESET:
722 		if (arg)
723 			bus = *((int *) arg);
724 		else
725 			bus = 0;
726 		printf("%s: SCSI bus %d reset detected\n", isp->isp_name, bus);
727 		break;
728 	case ISPASYNC_LOOP_DOWN:
729 		/*
730 		 * Hopefully we get here in time to minimize the number
731 		 * of commands we are firing off that are sure to die.
732 		 */
733 		isp->isp_osinfo.blocked = 1;
734 		printf("%s: Loop DOWN\n", isp->isp_name);
735 		break;
736         case ISPASYNC_LOOP_UP:
737 		isp->isp_osinfo.blocked = 0;
738 		callout_reset(&isp->isp_osinfo._restart, 1,
739 		    isp_internal_restart, isp);
740 		printf("%s: Loop UP\n", isp->isp_name);
741 		break;
742 	case ISPASYNC_PDB_CHANGED:
743 	if (IS_FC(isp) && isp->isp_dblev) {
744 		const char *fmt = "%s: Target %d (Loop 0x%x) Port ID 0x%x "
745 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x\n";
746 		const static char *roles[4] = {
747 		    "No", "Target", "Initiator", "Target/Initiator"
748 		};
749 		char *ptr;
750 		fcparam *fcp = isp->isp_param;
751 		int tgt = *((int *) arg);
752 		struct lportdb *lp = &fcp->portdb[tgt];
753 
754 		if (lp->valid) {
755 			ptr = "arrived";
756 		} else {
757 			ptr = "disappeared";
758 		}
759 		printf(fmt, isp->isp_name, tgt, lp->loopid, lp->portid,
760 		    roles[lp->roles & 0x3], ptr,
761 		    (u_int32_t) (lp->port_wwn >> 32),
762 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
763 		    (u_int32_t) (lp->node_wwn >> 32),
764 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
765 		break;
766 	}
767 #ifdef	ISP2100_FABRIC
768 	case ISPASYNC_CHANGE_NOTIFY:
769 		printf("%s: Name Server Database Changed\n", isp->isp_name);
770 		break;
771 	case ISPASYNC_FABRIC_DEV:
772 	{
773 		int target;
774 		struct lportdb *lp;
775 		sns_scrsp_t *resp = (sns_scrsp_t *) arg;
776 		u_int32_t portid;
777 		u_int64_t wwn;
778 		fcparam *fcp = isp->isp_param;
779 
780 		portid =
781 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
782 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
783 		    (((u_int32_t) resp->snscb_port_id[2]));
784 		wwn =
785 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
786 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
787 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
788 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
789 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
790 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
791 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
792 		    (((u_int64_t)resp->snscb_portname[7]));
793 		printf("%s: Fabric Device (Type 0x%x)@PortID 0x%x WWN "
794 		    "0x%08x%08x\n", isp->isp_name, resp->snscb_port_type,
795 		    portid, ((u_int32_t)(wwn >> 32)),
796 		    ((u_int32_t)(wwn & 0xffffffff)));
797 		if (resp->snscb_port_type != 2)
798 			break;
799 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
800 			lp = &fcp->portdb[target];
801 			if (lp->port_wwn == wwn)
802 				break;
803 		}
804 		if (target < MAX_FC_TARG) {
805 			break;
806 		}
807 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
808 			lp = &fcp->portdb[target];
809 			if (lp->port_wwn == 0)
810 				break;
811 		}
812 		if (target == MAX_FC_TARG) {
813 			printf("%s: no more space for fabric devices\n",
814 			    isp->isp_name);
815 			return (-1);
816 		}
817 		lp->port_wwn = lp->node_wwn = wwn;
818 		lp->portid = portid;
819 		break;
820 	}
821 #endif
822 	default:
823 		break;
824 	}
825 	(void) splx(s);
826 	return (0);
827 }
828