xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 5aefcfdc06931dd97e76246d2fe0302f7b3fe094)
1 /* $NetBSD: isp_netbsd.c,v 1.37 2000/12/28 22:27:47 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63 
64 
65 /*
66  * Set a timeout for the watchdogging of a command.
67  *
68  * The dimensional analysis is
69  *
70  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71  *
72  *			=
73  *
74  *	(milliseconds / 1000) * hz = ticks
75  *
76  *
77  * For timeouts less than 1 second, we'll get zero. Because of this, and
78  * because we want to establish *our* timeout to be longer than what the
79  * firmware might do, we just add 3 seconds at the back end.
80  */
81 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
82 
83 static void ispminphys __P((struct buf *));
84 static int32_t ispcmd __P((XS_T *));
85 static int
86 ispioctl __P((struct scsipi_link *, u_long, caddr_t, int, struct proc *));
87 
88 static struct scsipi_device isp_dev = { NULL, NULL, NULL, NULL };
89 static int isp_polled_cmd __P((struct ispsoftc *, XS_T *));
90 static void isp_dog __P((void *));
91 static void isp_command_requeue __P((void *));
92 static void isp_internal_restart __P((void *));
93 
94 /*
95  * Complete attachment of hardware, include subdevices.
96  */
97 void
98 isp_attach(isp)
99 	struct ispsoftc *isp;
100 {
101 	isp->isp_osinfo._adapter.scsipi_minphys = ispminphys;
102 	isp->isp_osinfo._adapter.scsipi_ioctl = ispioctl;
103 	isp->isp_osinfo._adapter.scsipi_cmd = ispcmd;
104 
105 	isp->isp_state = ISP_RUNSTATE;
106 	isp->isp_osinfo._link.scsipi_scsi.channel =
107 	    (IS_DUALBUS(isp))? 0 : SCSI_CHANNEL_ONLY_ONE;
108 	isp->isp_osinfo._link.adapter_softc = isp;
109 	isp->isp_osinfo._link.device = &isp_dev;
110 	isp->isp_osinfo._link.adapter = &isp->isp_osinfo._adapter;
111 	isp->isp_osinfo._link.openings = isp->isp_maxcmds;
112 	/*
113 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
114 	 */
115 	isp->isp_osinfo._link.scsipi_scsi.max_lun =
116 	   (isp->isp_maxluns < 7)? isp->isp_maxluns - 1 : 7;
117 	TAILQ_INIT(&isp->isp_osinfo.waitq);	/* The 2nd bus will share.. */
118 
119 	if (IS_FC(isp)) {
120 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_FC_TARG-1;
121 	} else {
122 		sdparam *sdp = isp->isp_param;
123 		isp->isp_osinfo._link.scsipi_scsi.max_target = MAX_TARGETS-1;
124 		isp->isp_osinfo._link.scsipi_scsi.adapter_target =
125 		    sdp->isp_initiator_id;
126 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
127 		if (IS_DUALBUS(isp)) {
128 			isp->isp_osinfo._link_b = isp->isp_osinfo._link;
129 			sdp++;
130 			isp->isp_osinfo.discovered[1] =
131 			    1 << sdp->isp_initiator_id;
132 			isp->isp_osinfo._link_b.scsipi_scsi.adapter_target =
133 			    sdp->isp_initiator_id;
134 			isp->isp_osinfo._link_b.scsipi_scsi.channel = 1;
135 			isp->isp_osinfo._link_b.scsipi_scsi.max_lun =
136 			    isp->isp_osinfo._link.scsipi_scsi.max_lun;
137 		}
138 	}
139 	isp->isp_osinfo._link.type = BUS_SCSI;
140 
141 	/*
142 	 * Send a SCSI Bus Reset.
143 	 */
144 	if (IS_SCSI(isp)) {
145 		int bus = 0;
146 		ISP_LOCK(isp);
147 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
148 		if (IS_DUALBUS(isp)) {
149 			bus++;
150 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
151 		}
152 		ISP_UNLOCK(isp);
153 	} else {
154 		int defid;
155 		fcparam *fcp = isp->isp_param;
156 		delay(2 * 1000000);
157 		defid = MAX_FC_TARG;
158 		ISP_LOCK(isp);
159 		/*
160 		 * We probably won't have clock interrupts running,
161 		 * so we'll be really short (smoke test, really)
162 		 * at this time.
163 		 */
164 		if (isp_control(isp, ISPCTL_FCLINK_TEST, NULL)) {
165 			(void) isp_control(isp, ISPCTL_PDB_SYNC, NULL);
166 			if (fcp->isp_fwstate == FW_READY &&
167 			    fcp->isp_loopstate >= LOOP_PDB_RCVD) {
168 				defid = fcp->isp_loopid;
169 			}
170 		}
171 		ISP_UNLOCK(isp);
172 		isp->isp_osinfo._link.scsipi_scsi.adapter_target = defid;
173 	}
174 
175 	/*
176 	 * After this point, we'll be doing the new configuration
177 	 * schema which allows interrups, so we can do tsleep/wakeup
178 	 * for mailbox stuff at that point.
179 	 */
180 	isp->isp_osinfo.no_mbox_ints = 0;
181 
182 	/*
183 	 * And attach children (if any).
184 	 */
185 	config_found((void *)isp, &isp->isp_osinfo._link, scsiprint);
186 	if (IS_DUALBUS(isp)) {
187 		config_found((void *)isp, &isp->isp_osinfo._link_b, scsiprint);
188 	}
189 }
190 
191 /*
192  * minphys our xfers
193  *
194  * Unfortunately, the buffer pointer describes the target device- not the
195  * adapter device, so we can't use the pointer to find out what kind of
196  * adapter we are and adjust accordingly.
197  */
198 
199 static void
200 ispminphys(bp)
201 	struct buf *bp;
202 {
203 	/*
204 	 * XX: Only the 1020 has a 24 bit limit.
205 	 */
206 	if (bp->b_bcount >= (1 << 24)) {
207 		bp->b_bcount = (1 << 24);
208 	}
209 	minphys(bp);
210 }
211 
212 static int
213 ispioctl(sc_link, cmd, addr, flag, p)
214 	struct scsipi_link *sc_link;
215 	u_long cmd;
216 	caddr_t addr;
217 	int flag;
218 	struct proc *p;
219 {
220 	struct ispsoftc *isp = sc_link->adapter_softc;
221 	int s, chan, retval = ENOTTY;
222 
223 	chan = (sc_link->scsipi_scsi.channel == SCSI_CHANNEL_ONLY_ONE)? 0 :
224 	    sc_link->scsipi_scsi.channel;
225 
226 	switch (cmd) {
227 	case SCBUSACCEL:
228 	{
229 		struct scbusaccel_args *sp = (struct scbusaccel_args *)addr;
230 		if (IS_SCSI(isp) && sp->sa_lun == 0) {
231 			int dflags = 0;
232 			sdparam *sdp = SDPARAM(isp);
233 
234 			sdp += chan;
235 			if (sp->sa_flags & SC_ACCEL_TAGS)
236 				dflags |= DPARM_TQING;
237 			if (sp->sa_flags & SC_ACCEL_WIDE)
238 				dflags |= DPARM_WIDE;
239 			if (sp->sa_flags & SC_ACCEL_SYNC)
240 				dflags |= DPARM_SYNC;
241 			s = splbio();
242 			sdp->isp_devparam[sp->sa_target].dev_flags |= dflags;
243 			dflags = sdp->isp_devparam[sp->sa_target].dev_flags;
244 			sdp->isp_devparam[sp->sa_target].dev_update = 1;
245 			isp->isp_update |= (1 << chan);
246 			splx(s);
247 			isp_prt(isp, ISP_LOGDEBUG1,
248 			    "ispioctl: device flags 0x%x for %d.%d.X",
249 			    dflags, chan, sp->sa_target);
250 		}
251 		retval = 0;
252 		break;
253 	}
254 	case SCBUSIORESET:
255 		s = splbio();
256 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan))
257 			retval = EIO;
258 		else
259 			retval = 0;
260 		(void) splx(s);
261 		break;
262 	default:
263 		break;
264 	}
265 	return (retval);
266 }
267 
268 
269 static int32_t
270 ispcmd(xs)
271 	XS_T *xs;
272 {
273 	struct ispsoftc *isp;
274 	int result, s;
275 
276 	isp = XS_ISP(xs);
277 	s = splbio();
278 	if (isp->isp_state < ISP_RUNSTATE) {
279 		DISABLE_INTS(isp);
280 		isp_init(isp);
281                 if (isp->isp_state != ISP_INITSTATE) {
282 			ENABLE_INTS(isp);
283                         (void) splx(s);
284                         XS_SETERR(xs, HBA_BOTCH);
285                         return (COMPLETE);
286                 }
287                 isp->isp_state = ISP_RUNSTATE;
288 		ENABLE_INTS(isp);
289         }
290 
291 	/*
292 	 * Check for queue blockage...
293 	 */
294 	if (isp->isp_osinfo.blocked) {
295 		if (xs->xs_control & XS_CTL_POLL) {
296 			xs->error = XS_DRIVER_STUFFUP;
297 			splx(s);
298 			return (TRY_AGAIN_LATER);
299 		}
300 		TAILQ_INSERT_TAIL(&isp->isp_osinfo.waitq, xs, adapter_q);
301 		splx(s);
302 		return (SUCCESSFULLY_QUEUED);
303 	}
304 
305 	if (xs->xs_control & XS_CTL_POLL) {
306 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
307 		isp->isp_osinfo.no_mbox_ints = 1;
308 		result = isp_polled_cmd(isp, xs);
309 		isp->isp_osinfo.no_mbox_ints = ombi;
310 		(void) splx(s);
311 		return (result);
312 	}
313 
314 	result = isp_start(xs);
315 #if	0
316 {
317 	static int na[16] = { 0 };
318 	if (na[isp->isp_unit] < isp->isp_nactive) {
319 		isp_prt(isp, ISP_LOGALL, "active hiwater %d", isp->isp_nactive);
320 		na[isp->isp_unit] = isp->isp_nactive;
321 	}
322 }
323 #endif
324 	switch (result) {
325 	case CMD_QUEUED:
326 		result = SUCCESSFULLY_QUEUED;
327 		if (xs->timeout) {
328 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
329 		}
330 		break;
331 	case CMD_EAGAIN:
332 		result = TRY_AGAIN_LATER;
333 		break;
334 	case CMD_RQLATER:
335 		result = SUCCESSFULLY_QUEUED;
336 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
337 		break;
338 	case CMD_COMPLETE:
339 		result = COMPLETE;
340 		break;
341 	}
342 	(void) splx(s);
343 	return (result);
344 }
345 
346 static int
347 isp_polled_cmd(isp, xs)
348 	struct ispsoftc *isp;
349 	XS_T *xs;
350 {
351 	int result;
352 	int infinite = 0, mswait;
353 
354 	result = isp_start(xs);
355 
356 	switch (result) {
357 	case CMD_QUEUED:
358 		result = SUCCESSFULLY_QUEUED;
359 		break;
360 	case CMD_RQLATER:
361 	case CMD_EAGAIN:
362 		if (XS_NOERR(xs)) {
363 			xs->error = XS_DRIVER_STUFFUP;
364 		}
365 		result = TRY_AGAIN_LATER;
366 		break;
367 	case CMD_COMPLETE:
368 		result = COMPLETE;
369 		break;
370 
371 	}
372 
373 	if (result != SUCCESSFULLY_QUEUED) {
374 		return (result);
375 	}
376 
377 	/*
378 	 * If we can't use interrupts, poll on completion.
379 	 */
380 	if ((mswait = XS_TIME(xs)) == 0)
381 		infinite = 1;
382 
383 	while (mswait || infinite) {
384 		if (isp_intr((void *)isp)) {
385 			if (XS_CMD_DONE_P(xs)) {
386 				break;
387 			}
388 		}
389 		USEC_DELAY(1000);
390 		mswait -= 1;
391 	}
392 
393 	/*
394 	 * If no other error occurred but we didn't finish,
395 	 * something bad happened.
396 	 */
397 	if (XS_CMD_DONE_P(xs) == 0) {
398 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
399 			isp_reinit(isp);
400 		}
401 		if (XS_NOERR(xs)) {
402 			XS_SETERR(xs, HBA_BOTCH);
403 		}
404 	}
405 	result = COMPLETE;
406 	return (result);
407 }
408 
409 void
410 isp_done(xs)
411 	XS_T *xs;
412 {
413 	XS_CMD_S_DONE(xs);
414 	if (XS_CMD_WDOG_P(xs) == 0) {
415 		struct ispsoftc *isp = XS_ISP(xs);
416 		callout_stop(&xs->xs_callout);
417 		if (XS_CMD_GRACE_P(xs)) {
418 			isp_prt(isp, ISP_LOGDEBUG1,
419 			    "finished command on borrowed time");
420 		}
421 		XS_CMD_S_CLEAR(xs);
422 		scsipi_done(xs);
423 	}
424 }
425 
426 static void
427 isp_dog(arg)
428 	void *arg;
429 {
430 	XS_T *xs = arg;
431 	struct ispsoftc *isp = XS_ISP(xs);
432 	u_int32_t handle;
433 
434 	ISP_ILOCK(isp);
435 	/*
436 	 * We've decided this command is dead. Make sure we're not trying
437 	 * to kill a command that's already dead by getting it's handle and
438 	 * and seeing whether it's still alive.
439 	 */
440 	handle = isp_find_handle(isp, xs);
441 	if (handle) {
442 		u_int16_t r, r1, i;
443 
444 		if (XS_CMD_DONE_P(xs)) {
445 			isp_prt(isp, ISP_LOGDEBUG1,
446 			    "watchdog found done cmd (handle 0x%x)", handle);
447 			ISP_IUNLOCK(isp);
448 			return;
449 		}
450 
451 		if (XS_CMD_WDOG_P(xs)) {
452 			isp_prt(isp, ISP_LOGDEBUG1,
453 			    "recursive watchdog (handle 0x%x)", handle);
454 			ISP_IUNLOCK(isp);
455 			return;
456 		}
457 
458 		XS_CMD_S_WDOG(xs);
459 
460 		i = 0;
461 		do {
462 			r = ISP_READ(isp, BIU_ISR);
463 			USEC_DELAY(1);
464 			r1 = ISP_READ(isp, BIU_ISR);
465 		} while (r != r1 && ++i < 1000);
466 
467 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
468 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
469 			    handle, r);
470 			XS_CMD_C_WDOG(xs);
471 			isp_done(xs);
472 		} else if (XS_CMD_GRACE_P(xs)) {
473 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
474 			    handle, r);
475 			/*
476 			 * Make sure the command is *really* dead before we
477 			 * release the handle (and DMA resources) for reuse.
478 			 */
479 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
480 
481 			/*
482 			 * After this point, the comamnd is really dead.
483 			 */
484 			if (XS_XFRLEN(xs)) {
485 				ISP_DMAFREE(isp, xs, handle);
486 			}
487 			isp_destroy_handle(isp, handle);
488 			XS_SETERR(xs, XS_TIMEOUT);
489 			XS_CMD_S_CLEAR(xs);
490 			isp_done(xs);
491 		} else {
492 			u_int16_t iptr, optr;
493 			ispreq_t *mp;
494 			isp_prt(isp, ISP_LOGDEBUG2,
495 			    "possible command timeout (%x, %x)", handle, r);
496 			XS_CMD_C_WDOG(xs);
497 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
498 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
499 				ISP_UNLOCK(isp);
500 				return;
501 			}
502 			XS_CMD_S_GRACE(xs);
503 			MEMZERO((void *) mp, sizeof (*mp));
504 			mp->req_header.rqs_entry_count = 1;
505 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
506 			mp->req_modifier = SYNC_ALL;
507 			mp->req_target = XS_CHANNEL(xs) << 7;
508 			ISP_SWIZZLE_REQUEST(isp, mp);
509 			ISP_ADD_REQUEST(isp, iptr);
510 		}
511 	} else {
512 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
513 	}
514 	ISP_IUNLOCK(isp);
515 }
516 
517 /*
518  * Free any associated resources prior to decommissioning and
519  * set the card to a known state (so it doesn't wake up and kick
520  * us when we aren't expecting it to).
521  *
522  * Locks are held before coming here.
523  */
524 void
525 isp_uninit(isp)
526 	struct ispsoftc *isp;
527 {
528 	isp_lock(isp);
529 	/*
530 	 * Leave with interrupts disabled.
531 	 */
532 	DISABLE_INTS(isp);
533 	isp_unlock(isp);
534 }
535 
536 /*
537  * Restart function for a command to be requeued later.
538  */
539 static void
540 isp_command_requeue(arg)
541 	void *arg;
542 {
543 	struct scsipi_xfer *xs = arg;
544 	struct ispsoftc *isp = XS_ISP(xs);
545 	ISP_ILOCK(isp);
546 	switch (ispcmd(xs)) {
547 	case SUCCESSFULLY_QUEUED:
548 		isp_prt(isp, ISP_LOGINFO,
549 		    "requeued commands for %d.%d", XS_TGT(xs), XS_LUN(xs));
550 		if (xs->timeout) {
551 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
552 		}
553 		break;
554 	case TRY_AGAIN_LATER:
555 		isp_prt(isp, ISP_LOGINFO,
556 		    "EAGAIN on requeue for %d.%d", XS_TGT(xs), XS_LUN(xs));
557 		callout_reset(&xs->xs_callout, hz, isp_command_requeue, xs);
558 		break;
559 	case COMPLETE:
560 		/* can only be an error */
561 		XS_CMD_S_DONE(xs);
562 		callout_stop(&xs->xs_callout);
563 		if (XS_NOERR(xs)) {
564 			XS_SETERR(xs, HBA_BOTCH);
565 		}
566 		scsipi_done(xs);
567 		break;
568 	}
569 	ISP_IUNLOCK(isp);
570 }
571 
572 /*
573  * Restart function after a LOOP UP event (e.g.),
574  * done as a timeout for some hysteresis.
575  */
576 static void
577 isp_internal_restart(arg)
578 	void *arg;
579 {
580 	struct ispsoftc *isp = arg;
581 	int result, nrestarted = 0;
582 
583 	ISP_ILOCK(isp);
584 	if (isp->isp_osinfo.blocked == 0) {
585 		struct scsipi_xfer *xs;
586 		while ((xs = TAILQ_FIRST(&isp->isp_osinfo.waitq)) != NULL) {
587 			TAILQ_REMOVE(&isp->isp_osinfo.waitq, xs, adapter_q);
588 			result = isp_start(xs);
589 			if (result != CMD_QUEUED) {
590 				isp_prt(isp, ISP_LOGERR,
591 				    "botched command restart (err=%d)", result);
592 				XS_CMD_S_DONE(xs);
593 				if (xs->error == XS_NOERROR)
594 					xs->error = XS_DRIVER_STUFFUP;
595 				callout_stop(&xs->xs_callout);
596 				scsipi_done(xs);
597 			} else if (xs->timeout) {
598 				callout_reset(&xs->xs_callout,
599 				    _XT(xs), isp_dog, xs);
600 			}
601 			nrestarted++;
602 		}
603 		isp_prt(isp, ISP_LOGINFO,
604 		    "isp_restart requeued %d commands", nrestarted);
605 	}
606 	ISP_IUNLOCK(isp);
607 }
608 
609 int
610 isp_async(isp, cmd, arg)
611 	struct ispsoftc *isp;
612 	ispasync_t cmd;
613 	void *arg;
614 {
615 	int bus, tgt;
616 	int s = splbio();
617 	switch (cmd) {
618 	case ISPASYNC_NEW_TGT_PARAMS:
619 	if (IS_SCSI(isp) && isp->isp_dblev) {
620 		sdparam *sdp = isp->isp_param;
621 		char *wt;
622 		int mhz, flags, period;
623 
624 		tgt = *((int *) arg);
625 		bus = (tgt >> 16) & 0xffff;
626 		tgt &= 0xffff;
627 		sdp += bus;
628 		flags = sdp->isp_devparam[tgt].cur_dflags;
629 		period = sdp->isp_devparam[tgt].cur_period;
630 
631 		if ((flags & DPARM_SYNC) && period &&
632 		    (sdp->isp_devparam[tgt].cur_offset) != 0) {
633 			/*
634 			 * There's some ambiguity about our negotiated speed
635 			 * if we haven't detected LVD mode correctly (which
636 			 * seems to happen, unfortunately). If we're in LVD
637 			 * mode, then different rules apply about speed.
638 			 */
639 			if (sdp->isp_lvdmode || period < 0xc) {
640 				switch (period) {
641 				case 0x9:
642 					mhz = 80;
643 					break;
644 				case 0xa:
645 					mhz = 40;
646 					break;
647 				case 0xb:
648 					mhz = 33;
649 					break;
650 				case 0xc:
651 					mhz = 25;
652 					break;
653 				default:
654 					mhz = 1000 / (period * 4);
655 					break;
656 				}
657 			} else {
658 				mhz = 1000 / (period * 4);
659 			}
660 		} else {
661 			mhz = 0;
662 		}
663 		switch (flags & (DPARM_WIDE|DPARM_TQING)) {
664 		case DPARM_WIDE:
665 			wt = ", 16 bit wide";
666 			break;
667 		case DPARM_TQING:
668 			wt = ", Tagged Queueing Enabled";
669 			break;
670 		case DPARM_WIDE|DPARM_TQING:
671 			wt = ", 16 bit wide, Tagged Queueing Enabled";
672 			break;
673 		default:
674 			wt = " ";
675 			break;
676 		}
677 		if (mhz) {
678 			isp_prt(isp, ISP_LOGINFO,
679 			    "Bus %d Target %d at %dMHz Max Offset %d%s",
680 			    bus, tgt, mhz, sdp->isp_devparam[tgt].cur_offset,
681 			    wt);
682 		} else {
683 			isp_prt(isp, ISP_LOGINFO,
684 			    "Bus %d Target %d Async Mode%s", bus, tgt, wt);
685 		}
686 		break;
687 	}
688 	case ISPASYNC_BUS_RESET:
689 		if (arg)
690 			bus = *((int *) arg);
691 		else
692 			bus = 0;
693 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
694 		break;
695 	case ISPASYNC_LOOP_DOWN:
696 		/*
697 		 * Hopefully we get here in time to minimize the number
698 		 * of commands we are firing off that are sure to die.
699 		 */
700 		isp->isp_osinfo.blocked = 1;
701 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
702 		break;
703         case ISPASYNC_LOOP_UP:
704 		isp->isp_osinfo.blocked = 0;
705 		callout_reset(&isp->isp_osinfo._restart, 1,
706 		    isp_internal_restart, isp);
707 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
708 		break;
709 	case ISPASYNC_PDB_CHANGED:
710 	if (IS_FC(isp) && isp->isp_dblev) {
711 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
712 		    "role %s %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
713 		const static char *roles[4] = {
714 		    "No", "Target", "Initiator", "Target/Initiator"
715 		};
716 		char *ptr;
717 		fcparam *fcp = isp->isp_param;
718 		int tgt = *((int *) arg);
719 		struct lportdb *lp = &fcp->portdb[tgt];
720 
721 		if (lp->valid) {
722 			ptr = "arrived";
723 		} else {
724 			ptr = "disappeared";
725 		}
726 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
727 		    roles[lp->roles & 0x3], ptr,
728 		    (u_int32_t) (lp->port_wwn >> 32),
729 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
730 		    (u_int32_t) (lp->node_wwn >> 32),
731 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
732 		break;
733 	}
734 #ifdef	ISP2100_FABRIC
735 	case ISPASYNC_CHANGE_NOTIFY:
736 		isp_prt(isp, ISP_LOGINFO, "Name Server Database Changed");
737 		break;
738 	case ISPASYNC_FABRIC_DEV:
739 	{
740 		int target;
741 		struct lportdb *lp;
742 		sns_scrsp_t *resp = (sns_scrsp_t *) arg;
743 		u_int32_t portid;
744 		u_int64_t wwn;
745 		fcparam *fcp = isp->isp_param;
746 
747 		portid =
748 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
749 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
750 		    (((u_int32_t) resp->snscb_port_id[2]));
751 		wwn =
752 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
753 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
754 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
755 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
756 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
757 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
758 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
759 		    (((u_int64_t)resp->snscb_portname[7]));
760 
761 		isp_prt(isp, ISP_LOGINFO,
762 		    "Fabric Device (Type 0x%x)@PortID 0x%x WWN 0x%08x%08x",
763 		    resp->snscb_port_type, portid, ((u_int32_t)(wwn >> 32)),
764 		    ((u_int32_t)(wwn & 0xffffffff)));
765 
766 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
767 			lp = &fcp->portdb[target];
768 			if (lp->port_wwn == wwn)
769 				break;
770 		}
771 		if (target < MAX_FC_TARG) {
772 			break;
773 		}
774 		for (target = FC_SNS_ID+1; target < MAX_FC_TARG; target++) {
775 			lp = &fcp->portdb[target];
776 			if (lp->port_wwn == 0)
777 				break;
778 		}
779 		if (target == MAX_FC_TARG) {
780 			isp_prt(isp, ISP_LOGWARN,
781 			    "no more space for fabric devices");
782 			return (-1);
783 		}
784 		lp->port_wwn = lp->node_wwn = wwn;
785 		lp->portid = portid;
786 		break;
787 	}
788 #endif
789 	default:
790 		break;
791 	}
792 	(void) splx(s);
793 	return (0);
794 }
795 
796 #include <machine/stdarg.h>
797 void
798 #ifdef	__STDC__
799 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
800 #else
801 isp_prt(isp, fmt, va_alist)
802 	struct ispsoftc *isp;
803 	char *fmt;
804 	va_dcl;
805 #endif
806 {
807 	va_list ap;
808 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
809 		return;
810 	}
811 	printf("%s: ", isp->isp_name);
812 	va_start(ap, fmt);
813 	vprintf(fmt, ap);
814 	va_end(ap);
815 	printf("\n");
816 }
817