xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 88ab7da936c561aba9ff7492617ffb08f8d08ece)
1 /* $NetBSD: isp_netbsd.c,v 1.75 2007/07/09 21:00:36 ad Exp $ */
2 /*
3  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4  */
5 /*
6  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7  * All rights reserved.
8  *
9  * Additional Copyright (C) 2000-2007 by Matthew Jacob
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.75 2007/07/09 21:00:36 ad Exp $");
37 
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41 
42 
43 /*
44  * Set a timeout for the watchdogging of a command.
45  *
46  * The dimensional analysis is
47  *
48  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
49  *
50  *			=
51  *
52  *	(milliseconds / 1000) * hz = ticks
53  *
54  *
55  * For timeouts less than 1 second, we'll get zero. Because of this, and
56  * because we want to establish *our* timeout to be longer than what the
57  * firmware might do, we just add 3 seconds at the back end.
58  */
59 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
60 
61 static void isp_config_interrupts(struct device *);
62 static void ispminphys_1020(struct buf *);
63 static void ispminphys(struct buf *);
64 static void ispcmd(struct ispsoftc *, XS_T *);
65 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
66 static int
67 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
68 
69 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
70 static void isp_dog(void *);
71 static void isp_gdt(void *);
72 static void isp_ldt(void *);
73 static void isp_make_here(ispsoftc_t *, int);
74 static void isp_make_gone(ispsoftc_t *, int);
75 static void isp_fc_worker(void *);
76 
77 static const char *roles[4] = {
78     "(none)", "Target", "Initiator", "Target/Initiator"
79 };
80 static const char prom3[] =
81     "PortID 0x%06x Departed from Target %u because of %s";
82 int isp_change_is_bad = 0;	/* "changed" devices are bad */
83 int isp_quickboot_time = 15;	/* don't wait more than N secs for loop up */
84 static int isp_fabric_hysteresis = 5;
85 #define	isp_change_is_bad	0
86 
87 
88 /*
89  * Complete attachment of hardware, include subdevices.
90  */
91 
92 void
93 isp_attach(struct ispsoftc *isp)
94 {
95 	isp->isp_state = ISP_RUNSTATE;
96 
97 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
98 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
99 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
100 	/*
101 	 * It's not stated whether max_periph is limited by SPI
102 	 * tag uage, but let's assume that it is.
103 	 */
104 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
105 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
106 	isp->isp_osinfo._adapter.adapt_request = isprequest;
107 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
108 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
109 	} else {
110 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
111 	}
112 
113 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
114 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
115 	isp->isp_osinfo._chan.chan_channel = 0;
116 
117 	/*
118 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
119 	 */
120 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
121 
122 	callout_init(&isp->isp_osinfo.gdt, 0);
123 	callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
124 
125 	callout_init(&isp->isp_osinfo.ldt, 0);
126 	callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
127 
128 	if (IS_FC(isp)) {
129 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
130 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
131 #ifdef	ISP_FW_CRASH_DUMP
132 		if (IS_2200(isp)) {
133 			FCPARAM(isp)->isp_dump_data =
134 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
135 				M_NOWAIT);
136 		} else if (IS_23XX(isp)) {
137 			FCPARAM(isp)->isp_dump_data =
138 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
139 				M_NOWAIT);
140 		}
141 		if (FCPARAM(isp)->isp_dump_data)
142 			FCPARAM(isp)->isp_dump_data[0] = 0;
143 #endif
144 	} else {
145 		int bus = 0;
146 		sdparam *sdp = isp->isp_param;
147 
148 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
149 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
150 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
151 		if (IS_DUALBUS(isp)) {
152 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
153 			sdp++;
154 			isp->isp_osinfo.discovered[1] =
155 			    1 << sdp->isp_initiator_id;
156 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
157 			isp->isp_osinfo._chan_b.chan_channel = 1;
158 		}
159 		ISP_LOCK(isp);
160 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
161 		if (IS_DUALBUS(isp)) {
162 			bus++;
163 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
164 		}
165 		ISP_UNLOCK(isp);
166 	}
167 
168 
169 	/*
170          * Defer enabling mailbox interrupts until later.
171          */
172         config_interrupts((struct device *) isp, isp_config_interrupts);
173 
174 	/*
175 	 * And attach children (if any).
176 	 */
177 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
178 	if (IS_DUALBUS(isp)) {
179 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
180 	}
181 }
182 
183 static void
184 isp_config_interrupts(struct device *self)
185 {
186         struct ispsoftc *isp = (struct ispsoftc *) self;
187 
188         isp->isp_osinfo.mbox_sleep_ok = 1;
189 
190 	if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
191 	    &isp->isp_osinfo.thread, "%s:fc_thrd", isp->isp_name)) {
192 		isp_prt(isp, ISP_LOGERR,
193 		    "unable to create FC worker thread");
194 		panic("isp_config_interrupts");
195 	}
196 }
197 
198 
199 /*
200  * minphys our xfers
201  */
202 static void
203 ispminphys_1020(struct buf *bp)
204 {
205 	if (bp->b_bcount >= (1 << 24)) {
206 		bp->b_bcount = (1 << 24);
207 	}
208 	minphys(bp);
209 }
210 
211 static void
212 ispminphys(struct buf *bp)
213 {
214 	if (bp->b_bcount >= (1 << 30)) {
215 		bp->b_bcount = (1 << 30);
216 	}
217 	minphys(bp);
218 }
219 
220 static int
221 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
222 	struct proc *p)
223 {
224 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
225 	int retval = ENOTTY;
226 
227 	switch (cmd) {
228 #ifdef	ISP_FW_CRASH_DUMP
229 	case ISP_GET_FW_CRASH_DUMP:
230 	{
231 		uint16_t *ptr = FCPARAM(isp)->isp_dump_data;
232 		size_t sz;
233 
234 		retval = 0;
235 		if (IS_2200(isp))
236 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
237 		else
238 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
239 		ISP_LOCK(isp);
240 		if (ptr && *ptr) {
241 			void *uaddr = *((void **) addr);
242 			if (copyout(ptr, uaddr, sz)) {
243 				retval = EFAULT;
244 			} else {
245 				*ptr = 0;
246 			}
247 		} else {
248 			retval = ENXIO;
249 		}
250 		ISP_UNLOCK(isp);
251 		break;
252 	}
253 
254 	case ISP_FORCE_CRASH_DUMP:
255 		ISP_LOCK(isp);
256 		if (isp->isp_osinfo.blocked == 0) {
257                         isp->isp_osinfo.blocked = 1;
258 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
259 			    "FREEZE QUEUES @ LINE %d", __LINE__);
260                         scsipi_channel_freeze(&isp->isp_chanA, 1);
261                 }
262 		isp_fw_dump(isp);
263 		isp_reinit(isp);
264 		ISP_UNLOCK(isp);
265 		retval = 0;
266 		break;
267 #endif
268 	case ISP_SDBLEV:
269 	{
270 		int olddblev = isp->isp_dblev;
271 		isp->isp_dblev = *(int *)addr;
272 		*(int *)addr = olddblev;
273 		retval = 0;
274 		break;
275 	}
276 	case ISP_RESETHBA:
277 		ISP_LOCK(isp);
278 		isp_reinit(isp);
279 		ISP_UNLOCK(isp);
280 		retval = 0;
281 		break;
282 	case ISP_RESCAN:
283 		if (IS_FC(isp)) {
284 			ISP_LOCK(isp);
285 			if (isp_fc_runstate(isp, 5 * 1000000)) {
286 				retval = EIO;
287 			} else {
288 				retval = 0;
289 			}
290 			ISP_UNLOCK(isp);
291 		}
292 		break;
293 	case ISP_FC_LIP:
294 		if (IS_FC(isp)) {
295 			ISP_LOCK(isp);
296 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
297 				retval = EIO;
298 			} else {
299 				retval = 0;
300 			}
301 			ISP_UNLOCK(isp);
302 		}
303 		break;
304 	case ISP_FC_GETDINFO:
305 	{
306 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
307 		fcportdb_t *lp;
308 
309 		if (ifc->loopid >= MAX_FC_TARG) {
310 			retval = EINVAL;
311 			break;
312 		}
313 		ISP_LOCK(isp);
314 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
315 		if (lp->state == FC_PORTDB_STATE_VALID) {
316 			ifc->role = lp->roles;
317 			ifc->loopid = lp->handle;
318 			ifc->portid = lp->portid;
319 			ifc->node_wwn = lp->node_wwn;
320 			ifc->port_wwn = lp->port_wwn;
321 			retval = 0;
322 		} else {
323 			retval = ENODEV;
324 		}
325 		ISP_UNLOCK(isp);
326 		break;
327 	}
328 	case ISP_GET_STATS:
329 	{
330 		isp_stats_t *sp = (isp_stats_t *) addr;
331 
332 		MEMZERO(sp, sizeof (*sp));
333 		sp->isp_stat_version = ISP_STATS_VERSION;
334 		sp->isp_type = isp->isp_type;
335 		sp->isp_revision = isp->isp_revision;
336 		ISP_LOCK(isp);
337 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
338 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
339 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
340 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
341 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
342 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
343 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
344 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
345 		ISP_UNLOCK(isp);
346 		retval = 0;
347 		break;
348 	}
349 	case ISP_CLR_STATS:
350 		ISP_LOCK(isp);
351 		isp->isp_intcnt = 0;
352 		isp->isp_intbogus = 0;
353 		isp->isp_intmboxc = 0;
354 		isp->isp_intoasync = 0;
355 		isp->isp_rsltccmplt = 0;
356 		isp->isp_fphccmplt = 0;
357 		isp->isp_rscchiwater = 0;
358 		isp->isp_fpcchiwater = 0;
359 		ISP_UNLOCK(isp);
360 		retval = 0;
361 		break;
362 	case ISP_FC_GETHINFO:
363 	{
364 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
365 		MEMZERO(hba, sizeof (*hba));
366 		ISP_LOCK(isp);
367 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
368 		hba->fc_scsi_supported = 1;
369 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
370 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
371 		hba->nvram_node_wwn = FCPARAM(isp)->isp_wwnn_nvram;
372 		hba->nvram_port_wwn = FCPARAM(isp)->isp_wwpn_nvram;
373 		hba->active_node_wwn = ISP_NODEWWN(isp);
374 		hba->active_port_wwn = ISP_PORTWWN(isp);
375 		ISP_UNLOCK(isp);
376 		retval = 0;
377 		break;
378 	}
379 	case SCBUSIORESET:
380 		ISP_LOCK(isp);
381 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
382 			retval = EIO;
383 		} else {
384 			retval = 0;
385 		}
386 		ISP_UNLOCK(isp);
387 		break;
388 	default:
389 		break;
390 	}
391 	return (retval);
392 }
393 
394 static void
395 ispcmd(struct ispsoftc *isp, XS_T *xs)
396 {
397 	volatile uint8_t ombi;
398 	int lim;
399 
400 	ISP_LOCK(isp);
401 	if (isp->isp_state < ISP_RUNSTATE) {
402 		ISP_DISABLE_INTS(isp);
403 		isp_init(isp);
404 		if (isp->isp_state != ISP_INITSTATE) {
405 			ISP_ENABLE_INTS(isp);
406 			ISP_UNLOCK(isp);
407 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
408 			XS_SETERR(xs, HBA_BOTCH);
409 			scsipi_done(xs);
410 			return;
411 		}
412 		isp->isp_state = ISP_RUNSTATE;
413 		ISP_ENABLE_INTS(isp);
414 	}
415 
416 	/*
417 	 * Handle the case of a FC card where the FC thread hasn't
418 	 * fired up yet and we don't yet have a known loop state.
419 	 */
420 	if (IS_FC(isp) && (FCPARAM(isp)->isp_fwstate != FW_READY ||
421 	    FCPARAM(isp)->isp_loopstate != LOOP_READY) &&
422 	    isp->isp_osinfo.thread == NULL) {
423 		ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
424 		int delay_time;
425 
426 		if (xs->xs_control & XS_CTL_POLL) {
427 			isp->isp_osinfo.mbox_sleep_ok = 0;
428 		}
429 
430 		if (isp->isp_osinfo.loop_checked == 0) {
431 			delay_time = 10 * 1000000;
432 			isp->isp_osinfo.loop_checked = 1;
433 		} else {
434 			delay_time = 250000;
435 		}
436 
437 		if (isp_fc_runstate(isp, delay_time) != 0) {
438 			if (xs->xs_control & XS_CTL_POLL) {
439 				isp->isp_osinfo.mbox_sleep_ok = ombi;
440 			}
441 			if (FCPARAM(isp)->loop_seen_once == 0) {
442 				XS_SETERR(xs, HBA_SELTIMEOUT);
443 				scsipi_done(xs);
444 				ISP_UNLOCK(isp);
445 				return;
446 			}
447 			/*
448 			 * Otherwise, fall thru to be queued up for later.
449 			 */
450 		} else {
451 			int wasblocked =
452 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
453 			isp->isp_osinfo.blocked =
454 			    isp->isp_osinfo.paused = 0;
455 			if (wasblocked) {
456 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
457 				    "THAW QUEUES @ LINE %d", __LINE__);
458 				scsipi_channel_thaw(&isp->isp_chanA, 1);
459 			}
460 		}
461 		if (xs->xs_control & XS_CTL_POLL) {
462 			isp->isp_osinfo.mbox_sleep_ok = ombi;
463 		}
464 	}
465 
466 	if (isp->isp_osinfo.paused) {
467 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
468 		xs->error = XS_RESOURCE_SHORTAGE;
469 		scsipi_done(xs);
470 		ISP_UNLOCK(isp);
471 		return;
472 	}
473 	if (isp->isp_osinfo.blocked) {
474 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
475 		xs->error = XS_REQUEUE;
476 		scsipi_done(xs);
477 		ISP_UNLOCK(isp);
478 		return;
479 	}
480 
481 	if (xs->xs_control & XS_CTL_POLL) {
482 		ombi = isp->isp_osinfo.mbox_sleep_ok;
483 		isp->isp_osinfo.mbox_sleep_ok = 0;
484 	}
485 
486 	switch (isp_start(xs)) {
487 	case CMD_QUEUED:
488 		if (xs->xs_control & XS_CTL_POLL) {
489 			isp_polled_cmd_wait(isp, xs);
490 			isp->isp_osinfo.mbox_sleep_ok = ombi;
491 		} else if (xs->timeout) {
492 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
493 		}
494 		break;
495 	case CMD_EAGAIN:
496 		isp->isp_osinfo.paused = 1;
497 		xs->error = XS_RESOURCE_SHORTAGE;
498 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
499 		    "FREEZE QUEUES @ LINE %d", __LINE__);
500 		scsipi_channel_freeze(&isp->isp_chanA, 1);
501 		if (IS_DUALBUS(isp)) {
502 			scsipi_channel_freeze(&isp->isp_chanB, 1);
503 		}
504 		scsipi_done(xs);
505 		break;
506 	case CMD_RQLATER:
507 		/*
508 		 * We can only get RQLATER from FC devices (1 channel only)
509 		 *
510 		 * If we've never seen loop up see if if we've been down
511 		 * quickboot time, otherwise wait loop down limit time.
512 		 * If so, then we start giving up on commands.
513 		 */
514 		if (FCPARAM(isp)->loop_seen_once == 0) {
515 			lim = isp_quickboot_time;
516 		} else {
517 			lim = isp->isp_osinfo.loop_down_limit;
518 		}
519 		if (isp->isp_osinfo.loop_down_time >= lim) {
520 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
521 			    "RQLATER -> SELTIMEOUT");
522 			XS_SETERR(xs, HBA_SELTIMEOUT);
523 			scsipi_done(xs);
524 			break;
525 		}
526 		if (isp->isp_osinfo.blocked == 0) {
527 			isp->isp_osinfo.blocked = 1;
528 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
529 			    "FREEZE QUEUES @ LINE %d", __LINE__);
530 			scsipi_channel_freeze(&isp->isp_chanA, 1);
531 		} else {
532 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
533 			    "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
534 		}
535 		xs->error = XS_REQUEUE;
536 		scsipi_done(xs);
537 		break;
538 	case CMD_COMPLETE:
539 		scsipi_done(xs);
540 		break;
541 	}
542 	ISP_UNLOCK(isp);
543 }
544 
545 static void
546 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
547 {
548 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
549 
550 	switch (req) {
551 	case ADAPTER_REQ_RUN_XFER:
552 		ispcmd(isp, (XS_T *) arg);
553 		break;
554 
555 	case ADAPTER_REQ_GROW_RESOURCES:
556 		/* Not supported. */
557 		break;
558 
559 	case ADAPTER_REQ_SET_XFER_MODE:
560 	if (IS_SCSI(isp)) {
561 		struct scsipi_xfer_mode *xm = arg;
562 		int dflags = 0;
563 		sdparam *sdp = SDPARAM(isp);
564 
565 		sdp += chan->chan_channel;
566 		if (xm->xm_mode & PERIPH_CAP_TQING)
567 			dflags |= DPARM_TQING;
568 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
569 			dflags |= DPARM_WIDE;
570 		if (xm->xm_mode & PERIPH_CAP_SYNC)
571 			dflags |= DPARM_SYNC;
572 		ISP_LOCK(isp);
573 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
574 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
575 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
576 		isp->isp_update |= (1 << chan->chan_channel);
577 		ISP_UNLOCK(isp);
578 		isp_prt(isp, ISP_LOGDEBUG1,
579 		    "isprequest: device flags 0x%x for %d.%d.X",
580 		    dflags, chan->chan_channel, xm->xm_target);
581 		break;
582 	}
583 	default:
584 		break;
585 	}
586 }
587 
588 static void
589 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
590 {
591 	int infinite = 0, mswait;
592 
593 	/*
594 	 * If we can't use interrupts, poll on completion.
595 	 */
596 	if ((mswait = XS_TIME(xs)) == 0) {
597 		infinite = 1;
598 	}
599 
600 	while (mswait || infinite) {
601 		uint32_t isr;
602 		uint16_t sema, mbox;
603 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
604 			isp_intr(isp, isr, sema, mbox);
605 			if (XS_CMD_DONE_P(xs)) {
606 				break;
607 			}
608 		}
609 		USEC_DELAY(1000);
610 		mswait -= 1;
611 	}
612 
613 	/*
614 	 * If no other error occurred but we didn't finish
615 	 * something bad happened, so abort the command.
616 	 */
617 	if (XS_CMD_DONE_P(xs) == 0) {
618 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
619 			isp_reinit(isp);
620 		}
621 		if (XS_NOERR(xs)) {
622 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
623 			XS_SETERR(xs, HBA_BOTCH);
624 		}
625 	}
626 	scsipi_done(xs);
627 }
628 
629 void
630 isp_done(XS_T *xs)
631 {
632 	if (XS_CMD_WDOG_P(xs) == 0) {
633 		struct ispsoftc *isp = XS_ISP(xs);
634 		callout_stop(&xs->xs_callout);
635 		if (XS_CMD_GRACE_P(xs)) {
636 			isp_prt(isp, ISP_LOGDEBUG1,
637 			    "finished command on borrowed time");
638 		}
639 		XS_CMD_S_CLEAR(xs);
640 		/*
641 		 * Fixup- if we get a QFULL, we need
642 		 * to set XS_BUSY as the error.
643 		 */
644 		if (xs->status == SCSI_QUEUE_FULL) {
645 			xs->error = XS_BUSY;
646 		}
647 		if (isp->isp_osinfo.paused) {
648 			isp->isp_osinfo.paused = 0;
649 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
650 			    "THAW QUEUES @ LINE %d", __LINE__);
651 			scsipi_channel_timed_thaw(&isp->isp_chanA);
652 			if (IS_DUALBUS(isp)) {
653 				scsipi_channel_timed_thaw(&isp->isp_chanB);
654 			}
655 		}
656 		if (xs->error == XS_DRIVER_STUFFUP) {
657 			isp_prt(isp, ISP_LOGERR,
658 			    "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
659 			    XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
660 			    XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
661 		}
662 		scsipi_done(xs);
663 	}
664 }
665 
666 static void
667 isp_dog(void *arg)
668 {
669 	XS_T *xs = arg;
670 	struct ispsoftc *isp = XS_ISP(xs);
671 	uint32_t handle;
672 
673 
674 	ISP_ILOCK(isp);
675 	/*
676 	 * We've decided this command is dead. Make sure we're not trying
677 	 * to kill a command that's already dead by getting it's handle and
678 	 * and seeing whether it's still alive.
679 	 */
680 	handle = isp_find_handle(isp, xs);
681 	if (handle) {
682 		uint32_t isr;
683 		uint16_t mbox, sema;
684 
685 		if (XS_CMD_DONE_P(xs)) {
686 			isp_prt(isp, ISP_LOGDEBUG1,
687 			    "watchdog found done cmd (handle 0x%x)", handle);
688 			ISP_IUNLOCK(isp);
689 			return;
690 		}
691 
692 		if (XS_CMD_WDOG_P(xs)) {
693 			isp_prt(isp, ISP_LOGDEBUG1,
694 			    "recursive watchdog (handle 0x%x)", handle);
695 			ISP_IUNLOCK(isp);
696 			return;
697 		}
698 
699 		XS_CMD_S_WDOG(xs);
700 
701 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
702 			isp_intr(isp, isr, sema, mbox);
703 
704 		}
705 		if (XS_CMD_DONE_P(xs)) {
706 			isp_prt(isp, ISP_LOGDEBUG1,
707 			    "watchdog cleanup for handle 0x%x", handle);
708 			XS_CMD_C_WDOG(xs);
709 			isp_done(xs);
710 		} else if (XS_CMD_GRACE_P(xs)) {
711 			isp_prt(isp, ISP_LOGDEBUG1,
712 			    "watchdog timeout for handle 0x%x", handle);
713 			/*
714 			 * Make sure the command is *really* dead before we
715 			 * release the handle (and DMA resources) for reuse.
716 			 */
717 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
718 
719 			/*
720 			 * After this point, the command is really dead.
721 			 */
722 			if (XS_XFRLEN(xs)) {
723 				ISP_DMAFREE(isp, xs, handle);
724 			}
725 			isp_destroy_handle(isp, handle);
726 			XS_SETERR(xs, XS_TIMEOUT);
727 			XS_CMD_S_CLEAR(xs);
728 			isp_done(xs);
729 		} else {
730 			uint32_t nxti, optr;
731 			void *qe;
732 			isp_marker_t local, *mp = &local;
733 			isp_prt(isp, ISP_LOGDEBUG2,
734 			    "possible command timeout on handle %x", handle);
735 			XS_CMD_C_WDOG(xs);
736 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
737 			if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
738 				ISP_UNLOCK(isp);
739 				return;
740 			}
741 			XS_CMD_S_GRACE(xs);
742 			MEMZERO((void *) mp, sizeof (*mp));
743 			mp->mrk_header.rqs_entry_count = 1;
744 			mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
745 			mp->mrk_modifier = SYNC_ALL;
746 			mp->mrk_target = XS_CHANNEL(xs) << 7;
747 			isp_put_marker(isp, mp, qe);
748 			ISP_ADD_REQUEST(isp, nxti);
749 		}
750 	} else {
751 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
752 	}
753 	ISP_IUNLOCK(isp);
754 }
755 
756 /*
757  * Gone Device Timer Function- when we have decided that a device has gone
758  * away, we wait a specific period of time prior to telling the OS it has
759  * gone away.
760  *
761  * This timer function fires once a second and then scans the port database
762  * for devices that are marked dead but still have a virtual target assigned.
763  * We decrement a counter for that port database entry, and when it hits zero,
764  * we tell the OS the device has gone away.
765  */
766 static void
767 isp_gdt(void *arg)
768 {
769 	ispsoftc_t *isp = arg;
770 	fcportdb_t *lp;
771 	int dbidx, tgt, more_to_do = 0;
772 
773 	isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
774 	ISP_LOCK(isp);
775 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
776 		lp = &FCPARAM(isp)->portdb[dbidx];
777 
778 		if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
779 			continue;
780 		}
781 		if (lp->ini_map_idx == 0) {
782 			continue;
783 		}
784 		if (lp->new_reserved == 0) {
785 			continue;
786 		}
787 		lp->new_reserved -= 1;
788 		if (lp->new_reserved != 0) {
789 			more_to_do++;
790 			continue;
791 		}
792 		tgt = lp->ini_map_idx - 1;
793 		FCPARAM(isp)->isp_ini_map[tgt] = 0;
794 		lp->ini_map_idx = 0;
795 		lp->state = FC_PORTDB_STATE_NIL;
796 		isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
797 		    "Gone Device Timeout");
798 		isp_make_gone(isp, tgt);
799 	}
800 	if (more_to_do) {
801 		callout_schedule(&isp->isp_osinfo.gdt, hz);
802 	} else {
803 		isp->isp_osinfo.gdt_running = 0;
804 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
805 		    "stopping Gone Device Timer");
806 	}
807 	ISP_UNLOCK(isp);
808 }
809 
810 /*
811  * Loop Down Timer Function- when loop goes down, a timer is started and
812  * and after it expires we come here and take all probational devices that
813  * the OS knows about and the tell the OS that they've gone away.
814  *
815  * We don't clear the devices out of our port database because, when loop
816  * come back up, we have to do some actual cleanup with the chip at that
817  * point (implicit PLOGO, e.g., to get the chip's port database state right).
818  */
819 static void
820 isp_ldt(void *arg)
821 {
822 	ispsoftc_t *isp = arg;
823 	fcportdb_t *lp;
824 	int dbidx, tgt;
825 
826 	isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
827 	ISP_LOCK(isp);
828 
829 	/*
830 	 * Notify to the OS all targets who we now consider have departed.
831 	 */
832 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
833 		lp = &FCPARAM(isp)->portdb[dbidx];
834 
835 		if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
836 			continue;
837 		}
838 		if (lp->ini_map_idx == 0) {
839 			continue;
840 		}
841 
842 		/*
843 		 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
844 		 */
845 
846 		/*
847 		 * Mark that we've announced that this device is gone....
848 		 */
849 		lp->reserved = 1;
850 
851 		/*
852 		 * but *don't* change the state of the entry. Just clear
853 		 * any target id stuff and announce to CAM that the
854 		 * device is gone. This way any necessary PLOGO stuff
855 		 * will happen when loop comes back up.
856 		 */
857 
858 		tgt = lp->ini_map_idx - 1;
859 		FCPARAM(isp)->isp_ini_map[tgt] = 0;
860 		lp->ini_map_idx = 0;
861 		isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
862 		    "Loop Down Timeout");
863 		isp_make_gone(isp, tgt);
864 	}
865 
866 	/*
867 	 * The loop down timer has expired. Wake up the kthread
868 	 * to notice that fact (or make it false).
869 	 */
870 	isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
871 	wakeup(&isp->isp_osinfo.thread);
872 	ISP_UNLOCK(isp);
873 }
874 
875 static void
876 isp_make_here(ispsoftc_t *isp, int tgt)
877 {
878 	isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
879 }
880 
881 static void
882 isp_make_gone(ispsoftc_t *isp, int tgt)
883 {
884 	isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
885 }
886 
887 static void
888 isp_fc_worker(void *arg)
889 {
890 	void scsipi_run_queue(struct scsipi_channel *);
891 	ispsoftc_t *isp = arg;
892 	int slp = 0;
893 	int s = splbio();
894 	/*
895 	 * The first loop is for our usage where we have yet to have
896 	 * gotten good fibre channel state.
897 	 */
898 	while (isp->isp_osinfo.thread != NULL) {
899 		int sok, lb, lim;
900 
901 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
902 		sok = isp->isp_osinfo.mbox_sleep_ok;
903 		isp->isp_osinfo.mbox_sleep_ok = 1;
904 		lb = isp_fc_runstate(isp, 250000);
905 		isp->isp_osinfo.mbox_sleep_ok = sok;
906 		if (lb) {
907 			/*
908 			 * Increment loop down time by the last sleep interval
909 			 */
910 			isp->isp_osinfo.loop_down_time += slp;
911 
912 			if (lb < 0) {
913 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
914 				    "FC loop not up (down count %d)",
915 				    isp->isp_osinfo.loop_down_time);
916 			} else {
917 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
918 				    "FC got to %d (down count %d)",
919 				    lb, isp->isp_osinfo.loop_down_time);
920 			}
921 
922 
923 			/*
924 			 * If we've never seen loop up and we've waited longer
925 			 * than quickboot time, or we've seen loop up but we've
926 			 * waited longer than loop_down_limit, give up and go
927 			 * to sleep until loop comes up.
928 			 */
929 			if (FCPARAM(isp)->loop_seen_once == 0) {
930 				lim = isp_quickboot_time;
931 			} else {
932 				lim = isp->isp_osinfo.loop_down_limit;
933 			}
934 			if (isp->isp_osinfo.loop_down_time >= lim) {
935 				/*
936 				 * If we're now past our limit, release
937 				 * the queues and let them come in and
938 				 * either get HBA_SELTIMOUT or cause
939 				 * another freeze.
940 				 */
941 				isp->isp_osinfo.blocked = 1;
942 				slp = 0;
943 			} else if (isp->isp_osinfo.loop_down_time < 10) {
944 				slp = 1;
945 			} else if (isp->isp_osinfo.loop_down_time < 30) {
946 				slp = 5;
947 			} else if (isp->isp_osinfo.loop_down_time < 60) {
948 				slp = 10;
949 			} else if (isp->isp_osinfo.loop_down_time < 120) {
950 				slp = 20;
951 			} else {
952 				slp = 30;
953 			}
954 
955 		} else {
956 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
957 			    "FC state OK");
958 			isp->isp_osinfo.loop_down_time = 0;
959 			slp = 0;
960 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
961 			    "THAW QUEUES @ LINE %d", __LINE__);
962 			scsipi_channel_thaw(&isp->isp_chanA, 1);
963 		}
964 
965 		/*
966 		 * If we'd frozen the queues, unfreeze them now so that
967 		 * we can start getting commands. If the FC state isn't
968 		 * okay yet, they'll hit that in isp_start which will
969 		 * freeze the queues again.
970 		 */
971 		if (isp->isp_osinfo.blocked) {
972 			isp->isp_osinfo.blocked = 0;
973 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
974 			    "THAW QUEUES @ LINE %d", __LINE__);
975 			scsipi_channel_thaw(&isp->isp_chanA, 1);
976 		}
977 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
978 		tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
979 
980 		/*
981 		 * If slp is zero, we're waking up for the first time after
982 		 * things have been okay. In this case, we set a deferral state
983 		 * for all commands and delay hysteresis seconds before starting
984 		 * the FC state evaluation. This gives the loop/fabric a chance
985 		 * to settle.
986 		 */
987 		if (slp == 0 && isp_fabric_hysteresis) {
988 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
989 			    "sleep hysteresis tick time %d",
990 			    isp_fabric_hysteresis * hz);
991 			(void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
992 			    (isp_fabric_hysteresis * hz));
993 		}
994 	}
995 	splx(s);
996 
997 	/* In case parent is waiting for us to exit. */
998 	wakeup(&isp->isp_osinfo.thread);
999 	kthread_exit(0);
1000 }
1001 
1002 /*
1003  * Free any associated resources prior to decommissioning and
1004  * set the card to a known state (so it doesn't wake up and kick
1005  * us when we aren't expecting it to).
1006  *
1007  * Locks are held before coming here.
1008  */
1009 void
1010 isp_uninit(struct ispsoftc *isp)
1011 {
1012 	isp_lock(isp);
1013 	/*
1014 	 * Leave with interrupts disabled.
1015 	 */
1016 	ISP_DISABLE_INTS(isp);
1017 	isp_unlock(isp);
1018 }
1019 
1020 int
1021 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
1022 {
1023 	int bus, tgt;
1024 	const char *msg = NULL;
1025 	static const char prom[] =
1026 	    "PortID 0x%06x handle 0x%x role %s %s\n"
1027 	    "      WWNN 0x%08x%08x WWPN 0x%08x%08x";
1028 	static const char prom2[] =
1029 	    "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1030 	    "      WWNN 0x%08x%08x WWPN 0x%08x%08x";
1031 	fcportdb_t *lp;
1032 
1033 	switch (cmd) {
1034 	case ISPASYNC_NEW_TGT_PARAMS:
1035 	if (IS_SCSI(isp) && isp->isp_dblev) {
1036 		sdparam *sdp = isp->isp_param;
1037 		int flags;
1038 		struct scsipi_xfer_mode xm;
1039 
1040 		tgt = *((int *) arg);
1041 		bus = (tgt >> 16) & 0xffff;
1042 		tgt &= 0xffff;
1043 		sdp += bus;
1044 		flags = sdp->isp_devparam[tgt].actv_flags;
1045 
1046 		xm.xm_mode = 0;
1047 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1048 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1049 		xm.xm_target = tgt;
1050 
1051 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1052 			xm.xm_mode |= PERIPH_CAP_SYNC;
1053 		if (flags & DPARM_WIDE)
1054 			xm.xm_mode |= PERIPH_CAP_WIDE16;
1055 		if (flags & DPARM_TQING)
1056 			xm.xm_mode |= PERIPH_CAP_TQING;
1057 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1058 		    ASYNC_EVENT_XFER_MODE, &xm);
1059 		break;
1060 	}
1061 	case ISPASYNC_BUS_RESET:
1062 		bus = *((int *) arg);
1063 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
1064 		    ASYNC_EVENT_RESET, NULL);
1065 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1066 		break;
1067 	case ISPASYNC_LIP:
1068 		if (msg == NULL) {
1069 			msg = "LIP Received";
1070 		}
1071 		/* FALLTHROUGH */
1072 	case ISPASYNC_LOOP_RESET:
1073 		if (msg == NULL) {
1074 			msg = "LOOP Reset Received";
1075 		}
1076 		/* FALLTHROUGH */
1077 	case ISPASYNC_LOOP_DOWN:
1078 		if (msg == NULL) {
1079 			msg = "Loop DOWN";
1080 		}
1081 		/*
1082 		 * Don't do queue freezes or blockage until we have the
1083 		 * thread running that can unfreeze/unblock us.
1084 		 */
1085 		if (isp->isp_osinfo.blocked == 0) {
1086 			if (isp->isp_osinfo.thread) {
1087 				isp->isp_osinfo.blocked = 1;
1088 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1089 				    "FREEZE QUEUES @ LINE %d", __LINE__);
1090 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1091 			}
1092 		}
1093 		isp_prt(isp, ISP_LOGINFO, msg);
1094 		break;
1095         case ISPASYNC_LOOP_UP:
1096 		/*
1097 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1098 		 * the FC worker thread. When the FC worker thread
1099 		 * is done, let *it* call scsipi_channel_thaw...
1100 		 */
1101 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
1102 		break;
1103 	case ISPASYNC_DEV_ARRIVED:
1104 		lp = arg;
1105 		lp->reserved = 0;
1106 		if ((isp->isp_role & ISP_ROLE_INITIATOR) &&
1107 		    (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1108 			int dbidx = lp - FCPARAM(isp)->portdb;
1109 			int i;
1110 
1111 			for (i = 0; i < MAX_FC_TARG; i++) {
1112 				if (i >= FL_ID && i <= SNS_ID) {
1113 					continue;
1114 				}
1115 				if (FCPARAM(isp)->isp_ini_map[i] == 0) {
1116 					break;
1117 				}
1118 			}
1119 			if (i < MAX_FC_TARG) {
1120 				FCPARAM(isp)->isp_ini_map[i] = dbidx + 1;
1121 				lp->ini_map_idx = i + 1;
1122 			} else {
1123 				isp_prt(isp, ISP_LOGWARN, "out of target ids");
1124 				isp_dump_portdb(isp);
1125 			}
1126 		}
1127 		if (lp->ini_map_idx) {
1128 			tgt = lp->ini_map_idx - 1;
1129 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1130 			    lp->portid, lp->handle,
1131 		            roles[lp->roles], "arrived at", tgt,
1132 		    	    (uint32_t) (lp->node_wwn >> 32),
1133 			    (uint32_t) lp->node_wwn,
1134 		    	    (uint32_t) (lp->port_wwn >> 32),
1135 			    (uint32_t) lp->port_wwn);
1136 			isp_make_here(isp, tgt);
1137 		} else {
1138 			isp_prt(isp, ISP_LOGCONFIG, prom,
1139 			    lp->portid, lp->handle,
1140 		            roles[lp->roles], "arrived",
1141 		    	    (uint32_t) (lp->node_wwn >> 32),
1142 			    (uint32_t) lp->node_wwn,
1143 		    	    (uint32_t) (lp->port_wwn >> 32),
1144 			    (uint32_t) lp->port_wwn);
1145 		}
1146 		break;
1147 	case ISPASYNC_DEV_CHANGED:
1148 		lp = arg;
1149 		if (isp_change_is_bad) {
1150 			lp->state = FC_PORTDB_STATE_NIL;
1151 			if (lp->ini_map_idx) {
1152 				tgt = lp->ini_map_idx - 1;
1153 				FCPARAM(isp)->isp_ini_map[tgt] = 0;
1154 				lp->ini_map_idx = 0;
1155 				isp_prt(isp, ISP_LOGCONFIG, prom3,
1156 				    lp->portid, tgt, "change is bad");
1157 				isp_make_gone(isp, tgt);
1158 			} else {
1159 				isp_prt(isp, ISP_LOGCONFIG, prom,
1160 				    lp->portid, lp->handle,
1161 				    roles[lp->roles],
1162 				    "changed and departed",
1163 				    (uint32_t) (lp->node_wwn >> 32),
1164 				    (uint32_t) lp->node_wwn,
1165 				    (uint32_t) (lp->port_wwn >> 32),
1166 				    (uint32_t) lp->port_wwn);
1167 			}
1168 		} else {
1169 			lp->portid = lp->new_portid;
1170 			lp->roles = lp->new_roles;
1171 			if (lp->ini_map_idx) {
1172 				int t = lp->ini_map_idx - 1;
1173 				FCPARAM(isp)->isp_ini_map[t] =
1174 				    (lp - FCPARAM(isp)->portdb) + 1;
1175 				tgt = lp->ini_map_idx - 1;
1176 				isp_prt(isp, ISP_LOGCONFIG, prom2,
1177 				    lp->portid, lp->handle,
1178 				    roles[lp->roles], "changed at", tgt,
1179 				    (uint32_t) (lp->node_wwn >> 32),
1180 				    (uint32_t) lp->node_wwn,
1181 				    (uint32_t) (lp->port_wwn >> 32),
1182 				    (uint32_t) lp->port_wwn);
1183 			} else {
1184 				isp_prt(isp, ISP_LOGCONFIG, prom,
1185 				    lp->portid, lp->handle,
1186 				    roles[lp->roles], "changed",
1187 				    (uint32_t) (lp->node_wwn >> 32),
1188 				    (uint32_t) lp->node_wwn,
1189 				    (uint32_t) (lp->port_wwn >> 32),
1190 				    (uint32_t) lp->port_wwn);
1191 			}
1192 		}
1193 		break;
1194 	case ISPASYNC_DEV_STAYED:
1195 		lp = arg;
1196 		if (lp->ini_map_idx) {
1197 			tgt = lp->ini_map_idx - 1;
1198 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1199 			    lp->portid, lp->handle,
1200 		    	    roles[lp->roles], "stayed at", tgt,
1201 			    (uint32_t) (lp->node_wwn >> 32),
1202 			    (uint32_t) lp->node_wwn,
1203 		    	    (uint32_t) (lp->port_wwn >> 32),
1204 			    (uint32_t) lp->port_wwn);
1205 		} else {
1206 			isp_prt(isp, ISP_LOGCONFIG, prom,
1207 			    lp->portid, lp->handle,
1208 		    	    roles[lp->roles], "stayed",
1209 			    (uint32_t) (lp->node_wwn >> 32),
1210 			    (uint32_t) lp->node_wwn,
1211 		    	    (uint32_t) (lp->port_wwn >> 32),
1212 			    (uint32_t) lp->port_wwn);
1213 		}
1214 		break;
1215 	case ISPASYNC_DEV_GONE:
1216 		lp = arg;
1217 		/*
1218 		 * If this has a virtual target and we haven't marked it
1219 		 * that we're going to have isp_gdt tell the OS it's gone,
1220 		 * set the isp_gdt timer running on it.
1221 		 *
1222 		 * If it isn't marked that isp_gdt is going to get rid of it,
1223 		 * announce that it's gone.
1224 		 */
1225 		if (lp->ini_map_idx && lp->reserved == 0) {
1226 			lp->reserved = 1;
1227 			lp->new_reserved = isp->isp_osinfo.gone_device_time;
1228 			lp->state = FC_PORTDB_STATE_ZOMBIE;
1229 			if (isp->isp_osinfo.gdt_running == 0) {
1230 				isp->isp_osinfo.gdt_running = 1;
1231 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1232 				    "starting Gone Device Timer");
1233 				callout_schedule(&isp->isp_osinfo.gdt, hz);
1234 			}
1235 			tgt = lp->ini_map_idx - 1;
1236 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1237 			    lp->portid, lp->handle,
1238 		            roles[lp->roles], "gone zombie at", tgt,
1239 		    	    (uint32_t) (lp->node_wwn >> 32),
1240 			    (uint32_t) lp->node_wwn,
1241 		    	    (uint32_t) (lp->port_wwn >> 32),
1242 			    (uint32_t) lp->port_wwn);
1243 		} else if (lp->reserved == 0) {
1244 			isp_prt(isp, ISP_LOGCONFIG, prom,
1245 			    lp->portid, lp->handle,
1246 			    roles[lp->roles], "departed",
1247 			    (uint32_t) (lp->node_wwn >> 32),
1248 			    (uint32_t) lp->node_wwn,
1249 			    (uint32_t) (lp->port_wwn >> 32),
1250 			    (uint32_t) lp->port_wwn);
1251 		}
1252 		break;
1253 	case ISPASYNC_CHANGE_NOTIFY:
1254 	{
1255 		if (arg == ISPASYNC_CHANGE_PDB) {
1256 			msg = "Port Database Changed";
1257 		} else if (arg == ISPASYNC_CHANGE_SNS) {
1258 			msg = "Name Server Database Changed";
1259 		} else {
1260 			msg = "Other Change Notify";
1261 		}
1262 		/*
1263 		 * If the loop down timer is running, cancel it.
1264 		 */
1265 		if (callout_active(&isp->isp_osinfo.ldt)) {
1266 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1267 			   "Stopping Loop Down Timer");
1268 			callout_stop(&isp->isp_osinfo.ldt);
1269 		}
1270 		isp_prt(isp, ISP_LOGINFO, msg);
1271 		/*
1272 		 * We can set blocked here because we know it's now okay
1273 		 * to try and run isp_fc_runstate (in order to build loop
1274 		 * state). But we don't try and freeze the midlayer's queue
1275 		 * if we have no thread that we can wake to later unfreeze
1276 		 * it.
1277 		 */
1278 		if (isp->isp_osinfo.blocked == 0) {
1279 			isp->isp_osinfo.blocked = 1;
1280 			if (isp->isp_osinfo.thread) {
1281 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1282 				    "FREEZE QUEUES @ LINE %d", __LINE__);
1283 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1284 			}
1285 		}
1286 		/*
1287 		 * Note that we have work for the thread to do, and
1288 		 * if the thread is here already, wake it up.
1289 		 */
1290 		if (isp->isp_osinfo.thread) {
1291 			wakeup(&isp->isp_osinfo.thread);
1292 		} else {
1293 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1294 		}
1295 		break;
1296 	}
1297 	case ISPASYNC_FW_CRASH:
1298 	{
1299 		uint16_t mbox1, mbox6;
1300 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1301 		if (IS_DUALBUS(isp)) {
1302 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1303 		} else {
1304 			mbox6 = 0;
1305 		}
1306                 isp_prt(isp, ISP_LOGERR,
1307                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1308                     mbox6, mbox1);
1309 		if (IS_FC(isp)) {
1310 			if (isp->isp_osinfo.blocked == 0) {
1311 				isp->isp_osinfo.blocked = 1;
1312 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1313 				    "FREEZE QUEUES @ LINE %d", __LINE__);
1314 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1315 			}
1316 #ifdef	ISP_FW_CRASH_DUMP
1317 			isp_fw_dump(isp);
1318 #endif
1319 		}
1320 		mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1321 		isp->isp_osinfo.mbox_sleep_ok = 0;
1322 		isp_reinit(isp);
1323 		isp->isp_osinfo.mbox_sleep_ok = mbox1;
1324 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1325 		break;
1326 	}
1327 	default:
1328 		break;
1329 	}
1330 	return (0);
1331 }
1332 
1333 #include <machine/stdarg.h>
1334 void
1335 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1336 {
1337 	va_list ap;
1338 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1339 		return;
1340 	}
1341 	printf("%s: ", isp->isp_name);
1342 	va_start(ap, fmt);
1343 	vprintf(fmt, ap);
1344 	va_end(ap);
1345 	printf("\n");
1346 }
1347 
1348 void
1349 isp_lock(struct ispsoftc *isp)
1350 {
1351 	int s = splbio();
1352 	if (isp->isp_osinfo.islocked++ == 0) {
1353 		isp->isp_osinfo.splsaved = s;
1354 	} else {
1355 		splx(s);
1356 	}
1357 }
1358 
1359 void
1360 isp_unlock(struct ispsoftc *isp)
1361 {
1362 	if (isp->isp_osinfo.islocked-- <= 1) {
1363 		isp->isp_osinfo.islocked = 0;
1364 		splx(isp->isp_osinfo.splsaved);
1365 	}
1366 }
1367 
1368 uint64_t
1369 isp_microtime_sub(struct timeval *b, struct timeval *a)
1370 {
1371 	struct timeval x;
1372 	uint64_t elapsed;
1373 	timersub(b, a, &x);
1374 	elapsed = GET_NANOSEC(&x);
1375 	if (elapsed == 0)
1376 		elapsed++;
1377 	return (elapsed);
1378 }
1379 
1380 int
1381 isp_mbox_acquire(ispsoftc_t *isp)
1382 {
1383 	if (isp->isp_osinfo.mboxbsy) {
1384 		return (1);
1385 	} else {
1386 		isp->isp_osinfo.mboxcmd_done = 0;
1387 		isp->isp_osinfo.mboxbsy = 1;
1388 		return (0);
1389 	}
1390 }
1391 
1392 void
1393 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1394 {
1395 	unsigned int usecs = mbp->timeout;
1396 	unsigned int maxc, olim, ilim;
1397 	struct timeval start;
1398 
1399 	if (usecs == 0) {
1400 		usecs = MBCMD_DEFAULT_TIMEOUT;
1401 	}
1402 	maxc = isp->isp_mbxwrk0 + 1;
1403 
1404 	microtime(&start);
1405 	if (isp->isp_osinfo.mbox_sleep_ok) {
1406 		int to;
1407 		struct timeval tv;
1408 
1409 		tv.tv_sec = 0;
1410 		tv.tv_usec = 0;
1411 		for (olim = 0; olim < maxc; olim++) {
1412 			tv.tv_sec += (usecs / 1000000);
1413 			tv.tv_usec += (usecs % 1000000);
1414 			if (tv.tv_usec >= 100000) {
1415 				tv.tv_sec++;
1416 				tv.tv_usec -= 1000000;
1417 			}
1418 		}
1419 		timeradd(&tv, &start, &tv);
1420 		to = hzto(&tv);
1421 		if (to == 0)
1422 			to = 1;
1423 
1424 		isp->isp_osinfo.mbox_sleep_ok = 0;
1425 		isp->isp_osinfo.mbox_sleeping = 1;
1426 		tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1427 		isp->isp_osinfo.mbox_sleeping = 0;
1428 		isp->isp_osinfo.mbox_sleep_ok = 1;
1429 	} else {
1430 		for (olim = 0; olim < maxc; olim++) {
1431 			for (ilim = 0; ilim < usecs; ilim += 100) {
1432 				uint32_t isr;
1433 				uint16_t sema, mbox;
1434 				if (isp->isp_osinfo.mboxcmd_done) {
1435 					break;
1436 				}
1437 				if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1438 					isp_intr(isp, isr, sema, mbox);
1439 					if (isp->isp_osinfo.mboxcmd_done) {
1440 						break;
1441 					}
1442 				}
1443 				USEC_DELAY(100);
1444 			}
1445 			if (isp->isp_osinfo.mboxcmd_done) {
1446 				break;
1447 			}
1448 		}
1449 	}
1450 	if (isp->isp_osinfo.mboxcmd_done == 0) {
1451 		struct timeval finish, elapsed;
1452 
1453 		microtime(&finish);
1454 		timersub(&finish, &start, &elapsed);
1455 		isp_prt(isp, ISP_LOGWARN,
1456 		    "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1457 		    isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1458 		    isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) +
1459 		    elapsed.tv_usec);
1460 		mbp->param[0] = MBOX_TIMEOUT;
1461 		isp->isp_osinfo.mboxcmd_done = 1;
1462 	}
1463 }
1464 
1465 void
1466 isp_mbox_notify_done(ispsoftc_t *isp)
1467 {
1468 	if (isp->isp_osinfo.mbox_sleeping) {
1469 		wakeup(&isp->isp_mbxworkp);
1470 	}
1471 	isp->isp_osinfo.mboxcmd_done = 1;
1472 }
1473 
1474 void
1475 isp_mbox_release(ispsoftc_t *isp)
1476 {
1477 	isp->isp_osinfo.mboxbsy = 0;
1478 }
1479