xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /* $NetBSD: isp_netbsd.c,v 1.90 2018/09/03 16:29:31 riastradh Exp $ */
2 /*
3  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
4  */
5 /*
6  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
7  * All rights reserved.
8  *
9  * Additional Copyright (C) 2000-2007 by Matthew Jacob
10  * All rights reserved.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <sys/cdefs.h>
36 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.90 2018/09/03 16:29:31 riastradh Exp $");
37 
38 #include <dev/ic/isp_netbsd.h>
39 #include <dev/ic/isp_ioctl.h>
40 #include <sys/scsiio.h>
41 
42 #include <sys/timevar.h>
43 
44 /*
45  * Set a timeout for the watchdogging of a command.
46  *
47  * The dimensional analysis is
48  *
49  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
50  *
51  *			=
52  *
53  *	(milliseconds / 1000) * hz = ticks
54  *
55  *
56  * For timeouts less than 1 second, we'll get zero. Because of this, and
57  * because we want to establish *our* timeout to be longer than what the
58  * firmware might do, we just add 3 seconds at the back end.
59  */
60 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
61 
62 static void isp_config_interrupts(device_t);
63 static void ispminphys_1020(struct buf *);
64 static void ispminphys(struct buf *);
65 static void ispcmd(struct ispsoftc *, XS_T *);
66 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
67 static int
68 ispioctl(struct scsipi_channel *, u_long, void *, int, struct proc *);
69 
70 static void isp_polled_cmd_wait(struct ispsoftc *, XS_T *);
71 static void isp_dog(void *);
72 static void isp_gdt(void *);
73 static void isp_ldt(void *);
74 static void isp_make_here(ispsoftc_t *, int);
75 static void isp_make_gone(ispsoftc_t *, int);
76 static void isp_fc_worker(void *);
77 
78 static const char *roles[4] = {
79     "(none)", "Target", "Initiator", "Target/Initiator"
80 };
81 static const char prom3[] =
82     "PortID 0x%06x Departed from Target %u because of %s";
83 int isp_change_is_bad = 0;	/* "changed" devices are bad */
84 int isp_quickboot_time = 15;	/* don't wait more than N secs for loop up */
85 static int isp_fabric_hysteresis = 5;
86 #define	isp_change_is_bad	0
87 
88 /*
89  * Complete attachment of hardware, include subdevices.
90  */
91 
92 void
93 isp_attach(struct ispsoftc *isp)
94 {
95 	device_t self = isp->isp_osinfo.dev;
96 	int i;
97 
98 	isp->isp_state = ISP_RUNSTATE;
99 
100 	isp->isp_osinfo.adapter.adapt_dev = self;
101 	isp->isp_osinfo.adapter.adapt_openings = isp->isp_maxcmds;
102 	isp->isp_osinfo.loop_down_limit = 300;
103 
104 	/*
105 	 * It's not stated whether max_periph is limited by SPI
106 	 * tag uage, but let's assume that it is.
107 	 */
108 	isp->isp_osinfo.adapter.adapt_max_periph = uimin(isp->isp_maxcmds, 255);
109 	isp->isp_osinfo.adapter.adapt_ioctl = ispioctl;
110 	isp->isp_osinfo.adapter.adapt_request = isprequest;
111 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
112 		isp->isp_osinfo.adapter.adapt_minphys = ispminphys_1020;
113 	} else {
114 		isp->isp_osinfo.adapter.adapt_minphys = ispminphys;
115 	}
116 
117 	callout_init(&isp->isp_osinfo.gdt, 0);
118 	callout_setfunc(&isp->isp_osinfo.gdt, isp_gdt, isp);
119 	callout_init(&isp->isp_osinfo.ldt, 0);
120 	callout_setfunc(&isp->isp_osinfo.ldt, isp_ldt, isp);
121 	if (IS_FC(isp)) {
122 		if (kthread_create(PRI_NONE, 0, NULL, isp_fc_worker, isp,
123 		    &isp->isp_osinfo.thread, "%s:fc_thrd",
124 		    device_xname(self))) {
125 			isp_prt(isp, ISP_LOGERR,
126 			    "unable to create FC worker thread");
127 			return;
128 		}
129 	}
130 
131 	for (i = 0; i != isp->isp_osinfo.adapter.adapt_nchannels; i++) {
132 		isp->isp_osinfo.chan[i].chan_adapter =
133 		    &isp->isp_osinfo.adapter;
134 		isp->isp_osinfo.chan[i].chan_bustype = &scsi_bustype;
135 		isp->isp_osinfo.chan[i].chan_channel = i;
136 		/*
137 		 * Until the midlayer is fixed to use REPORT LUNS,
138 		 * limit to 8 luns.
139 		 */
140 		isp->isp_osinfo.chan[i].chan_nluns = uimin(isp->isp_maxluns, 8);
141 		if (IS_FC(isp)) {
142 			isp->isp_osinfo.chan[i].chan_ntargets = MAX_FC_TARG;
143 			if (ISP_CAP_2KLOGIN(isp) == 0 && MAX_FC_TARG > 256) {
144 				isp->isp_osinfo.chan[i].chan_ntargets = 256;
145 			}
146 			isp->isp_osinfo.chan[i].chan_id = MAX_FC_TARG;
147 		} else {
148 			isp->isp_osinfo.chan[i].chan_ntargets = MAX_TARGETS;
149 			isp->isp_osinfo.chan[i].chan_id =
150 			    SDPARAM(isp, i)->isp_initiator_id;
151 			ISP_LOCK(isp);
152 			(void) isp_control(isp, ISPCTL_RESET_BUS, i);
153 			ISP_UNLOCK(isp);
154 		}
155 	}
156 
157 	/*
158          * Defer enabling mailbox interrupts until later.
159          */
160         config_interrupts(self, isp_config_interrupts);
161 }
162 
163 static void
164 isp_config_interrupts(device_t self)
165 {
166 	int i;
167         struct ispsoftc *isp = device_private(self);
168 
169         isp->isp_osinfo.mbox_sleep_ok = 1;
170 
171 	if (IS_FC(isp) && (FCPARAM(isp, 0)->isp_fwstate != FW_READY ||
172 	    FCPARAM(isp, 0)->isp_loopstate != LOOP_READY)) {
173 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
174 		   "Starting Initial Loop Down Timer");
175 		callout_schedule(&isp->isp_osinfo.ldt, isp_quickboot_time * hz);
176 	}
177 
178 	/*
179 	 * And attach children (if any).
180 	 */
181 	for (i = 0; i < isp->isp_osinfo.adapter.adapt_nchannels; i++) {
182 		config_found(self, &isp->isp_osinfo.chan[i], scsiprint);
183 	}
184 }
185 
186 /*
187  * minphys our xfers
188  */
189 static void
190 ispminphys_1020(struct buf *bp)
191 {
192 	if (bp->b_bcount >= (1 << 24)) {
193 		bp->b_bcount = (1 << 24);
194 	}
195 	minphys(bp);
196 }
197 
198 static void
199 ispminphys(struct buf *bp)
200 {
201 	if (bp->b_bcount >= (1 << 30)) {
202 		bp->b_bcount = (1 << 30);
203 	}
204 	minphys(bp);
205 }
206 
207 static int
208 ispioctl(struct scsipi_channel *chan, u_long cmd, void *addr, int flag,
209 	struct proc *p)
210 {
211 	struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev);
212 	int nr, bus, retval = ENOTTY;
213 
214 	switch (cmd) {
215 	case ISP_SDBLEV:
216 	{
217 		int olddblev = isp->isp_dblev;
218 		isp->isp_dblev = *(int *)addr;
219 		*(int *)addr = olddblev;
220 		retval = 0;
221 		break;
222 	}
223 	case ISP_GETROLE:
224 		bus = *(int *)addr;
225 		if (bus < 0 || bus >= isp->isp_nchan) {
226 			retval = -ENXIO;
227 			break;
228 		}
229 		if (IS_FC(isp)) {
230 			*(int *)addr = FCPARAM(isp, bus)->role;
231 		} else {
232 			*(int *)addr = SDPARAM(isp, bus)->role;
233 		}
234 		retval = 0;
235 		break;
236 	case ISP_SETROLE:
237 
238 		nr = *(int *)addr;
239 		bus = nr >> 8;
240 		if (bus < 0 || bus >= isp->isp_nchan) {
241 			retval = -ENXIO;
242 			break;
243 		}
244 		nr &= 0xff;
245 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
246 			retval = EINVAL;
247 			break;
248 		}
249 		if (IS_FC(isp)) {
250 			*(int *)addr = FCPARAM(isp, bus)->role;
251 			FCPARAM(isp, bus)->role = nr;
252 		} else {
253 			*(int *)addr = SDPARAM(isp, bus)->role;
254 			SDPARAM(isp, bus)->role = nr;
255 		}
256 		retval = 0;
257 		break;
258 
259 	case ISP_RESETHBA:
260 		ISP_LOCK(isp);
261 		isp_reinit(isp, 0);
262 		ISP_UNLOCK(isp);
263 		retval = 0;
264 		break;
265 
266 	case ISP_RESCAN:
267 		if (IS_FC(isp)) {
268 			bus = *(int *)addr;
269 			if (bus < 0 || bus >= isp->isp_nchan) {
270 				retval = -ENXIO;
271 				break;
272 			}
273 			ISP_LOCK(isp);
274 			if (isp_fc_runstate(isp, bus, 5 * 1000000)) {
275 				retval = EIO;
276 			} else {
277 				retval = 0;
278 			}
279 			ISP_UNLOCK(isp);
280 		}
281 		break;
282 
283 	case ISP_FC_LIP:
284 		if (IS_FC(isp)) {
285 			bus = *(int *)addr;
286 			if (bus < 0 || bus >= isp->isp_nchan) {
287 				retval = -ENXIO;
288 				break;
289 			}
290 			ISP_LOCK(isp);
291 			if (isp_control(isp, ISPCTL_SEND_LIP, bus)) {
292 				retval = EIO;
293 			} else {
294 				retval = 0;
295 			}
296 			ISP_UNLOCK(isp);
297 		}
298 		break;
299 	case ISP_FC_GETDINFO:
300 	{
301 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
302 		fcportdb_t *lp;
303 
304 		if (IS_SCSI(isp)) {
305 			break;
306 		}
307 		if (ifc->loopid >= MAX_FC_TARG) {
308 			retval = EINVAL;
309 			break;
310 		}
311 		lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
312 		if (lp->state == FC_PORTDB_STATE_VALID) {
313 			ifc->role = lp->roles;
314 			ifc->loopid = lp->handle;
315 			ifc->portid = lp->portid;
316 			ifc->node_wwn = lp->node_wwn;
317 			ifc->port_wwn = lp->port_wwn;
318 			retval = 0;
319 		} else {
320 			retval = ENODEV;
321 		}
322 		break;
323 	}
324 	case ISP_GET_STATS:
325 	{
326 		isp_stats_t *sp = (isp_stats_t *) addr;
327 
328 		ISP_MEMZERO(sp, sizeof (*sp));
329 		sp->isp_stat_version = ISP_STATS_VERSION;
330 		sp->isp_type = isp->isp_type;
331 		sp->isp_revision = isp->isp_revision;
332 		ISP_LOCK(isp);
333 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
334 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
335 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
336 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
337 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
338 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
339 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
340 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
341 		ISP_UNLOCK(isp);
342 		retval = 0;
343 		break;
344 	}
345 	case ISP_CLR_STATS:
346 		ISP_LOCK(isp);
347 		isp->isp_intcnt = 0;
348 		isp->isp_intbogus = 0;
349 		isp->isp_intmboxc = 0;
350 		isp->isp_intoasync = 0;
351 		isp->isp_rsltccmplt = 0;
352 		isp->isp_fphccmplt = 0;
353 		isp->isp_rscchiwater = 0;
354 		isp->isp_fpcchiwater = 0;
355 		ISP_UNLOCK(isp);
356 		retval = 0;
357 		break;
358 	case ISP_FC_GETHINFO:
359 	{
360 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
361 		bus = hba->fc_channel;
362 
363 		if (bus < 0 || bus >= isp->isp_nchan) {
364 			retval = ENXIO;
365 			break;
366 		}
367 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
368 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
369 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
370 		hba->fc_nchannels = isp->isp_nchan;
371 		hba->fc_nports = isp->isp_nchan;/* XXXX 24XX STUFF? XXX */
372 		if (IS_FC(isp)) {
373 			hba->fc_speed = FCPARAM(isp, bus)->isp_gbspeed;
374 			hba->fc_topology = FCPARAM(isp, bus)->isp_topo + 1;
375 			hba->fc_loopid = FCPARAM(isp, bus)->isp_loopid;
376 			hba->nvram_node_wwn = FCPARAM(isp, bus)->isp_wwnn_nvram;
377 			hba->nvram_port_wwn = FCPARAM(isp, bus)->isp_wwpn_nvram;
378 			hba->active_node_wwn = FCPARAM(isp, bus)->isp_wwnn;
379 			hba->active_port_wwn = FCPARAM(isp, bus)->isp_wwpn;
380 		} else {
381 			hba->fc_speed = 0;
382 			hba->fc_topology = 0;
383 			hba->nvram_node_wwn = 0ull;
384 			hba->nvram_port_wwn = 0ull;
385 			hba->active_node_wwn = 0ull;
386 			hba->active_port_wwn = 0ull;
387 		}
388 		retval = 0;
389 		break;
390 	}
391 	case ISP_TSK_MGMT:
392 	{
393 		int needmarker;
394 		struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
395 		uint16_t loopid;
396 		mbreg_t mbs;
397 
398 		if (IS_SCSI(isp)) {
399 			break;
400 		}
401 
402 		bus = fct->chan;
403 		if (bus < 0 || bus >= isp->isp_nchan) {
404 			retval = -ENXIO;
405 			break;
406 		}
407 
408 		memset(&mbs, 0, sizeof (mbs));
409 		needmarker = retval = 0;
410 		loopid = fct->loopid;
411 		if (ISP_CAP_2KLOGIN(isp) == 0) {
412 			loopid <<= 8;
413 		}
414 		switch (fct->action) {
415 		case IPT_CLEAR_ACA:
416 			mbs.param[0] = MBOX_CLEAR_ACA;
417 			mbs.param[1] = loopid;
418 			mbs.param[2] = fct->lun;
419 			break;
420 		case IPT_TARGET_RESET:
421 			mbs.param[0] = MBOX_TARGET_RESET;
422 			mbs.param[1] = loopid;
423 			needmarker = 1;
424 			break;
425 		case IPT_LUN_RESET:
426 			mbs.param[0] = MBOX_LUN_RESET;
427 			mbs.param[1] = loopid;
428 			mbs.param[2] = fct->lun;
429 			needmarker = 1;
430 			break;
431 		case IPT_CLEAR_TASK_SET:
432 			mbs.param[0] = MBOX_CLEAR_TASK_SET;
433 			mbs.param[1] = loopid;
434 			mbs.param[2] = fct->lun;
435 			needmarker = 1;
436 			break;
437 		case IPT_ABORT_TASK_SET:
438 			mbs.param[0] = MBOX_ABORT_TASK_SET;
439 			mbs.param[1] = loopid;
440 			mbs.param[2] = fct->lun;
441 			needmarker = 1;
442 			break;
443 		default:
444 			retval = EINVAL;
445 			break;
446 		}
447 		if (retval == 0) {
448 			if (needmarker) {
449 				FCPARAM(isp, bus)->sendmarker = 1;
450 			}
451 			ISP_LOCK(isp);
452 			retval = isp_control(isp, ISPCTL_RUN_MBOXCMD, &mbs);
453 			ISP_UNLOCK(isp);
454 			if (retval) {
455 				retval = EIO;
456 			}
457 		}
458 		break;
459 	}
460 	case ISP_FC_GETDLIST:
461 	{
462 		isp_dlist_t local, *ua;
463 		uint16_t nph, nphe, count, channel, lim;
464 		struct wwnpair pair, *uptr;
465 
466 		if (IS_SCSI(isp)) {
467 			retval = EINVAL;
468 			break;
469 		}
470 
471 		ua = *(isp_dlist_t **)addr;
472 		if (copyin(ua, &local, sizeof (isp_dlist_t))) {
473 			retval = EFAULT;
474 			break;
475 		}
476 		lim = local.count;
477 		channel = local.channel;
478 		if (channel >= isp->isp_nchan) {
479 			retval = EINVAL;
480 			break;
481 		}
482 
483 		ua = *(isp_dlist_t **)addr;
484 		uptr = &ua->wwns[0];
485 
486 		if (ISP_CAP_2KLOGIN(isp)) {
487 			nphe = NPH_MAX_2K;
488 		} else {
489 			nphe = NPH_MAX;
490 		}
491 		for (count = 0, nph = 0; count < lim && nph != nphe; nph++) {
492 			ISP_LOCK(isp);
493 			retval = isp_control(isp, ISPCTL_GET_NAMES, channel,
494 			    nph, &pair.wwnn, &pair.wwpn);
495 			ISP_UNLOCK(isp);
496 			if (retval || (pair.wwpn == INI_NONE &&
497 			    pair.wwnn == INI_NONE)) {
498 				retval = 0;
499 				continue;
500 			}
501 			if (copyout(&pair, (void *)uptr++, sizeof (pair))) {
502 				retval = EFAULT;
503 				break;
504 			}
505 			count++;
506 		}
507 		if (retval == 0) {
508 			if (copyout(&count, (void *)&ua->count,
509 			    sizeof (count))) {
510 				retval = EFAULT;
511 			}
512 		}
513 		break;
514 	}
515 	case SCBUSIORESET:
516 		ISP_LOCK(isp);
517 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel)) {
518 			retval = EIO;
519 		} else {
520 			retval = 0;
521 		}
522 		ISP_UNLOCK(isp);
523 		break;
524 	default:
525 		break;
526 	}
527 	return (retval);
528 }
529 
530 static void
531 ispcmd(struct ispsoftc *isp, XS_T *xs)
532 {
533 	volatile uint8_t ombi;
534 	int lim, chan;
535 
536 	ISP_LOCK(isp);
537 	if (isp->isp_state < ISP_RUNSTATE) {
538 		ISP_DISABLE_INTS(isp);
539 		isp_init(isp);
540 		if (isp->isp_state != ISP_INITSTATE) {
541 			ISP_ENABLE_INTS(isp);
542 			ISP_UNLOCK(isp);
543 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
544 			XS_SETERR(xs, HBA_BOTCH);
545 			scsipi_done(xs);
546 			return;
547 		}
548 		isp->isp_state = ISP_RUNSTATE;
549 		ISP_ENABLE_INTS(isp);
550 	}
551 	chan = XS_CHANNEL(xs);
552 
553 	/*
554 	 * Handle the case of a FC card where the FC thread hasn't
555 	 * fired up yet and we don't yet have a known loop state.
556 	 */
557 	if (IS_FC(isp) && (FCPARAM(isp, chan)->isp_fwstate != FW_READY ||
558 	    FCPARAM(isp, chan)->isp_loopstate != LOOP_READY) &&
559 	    isp->isp_osinfo.thread == NULL) {
560 		ombi = isp->isp_osinfo.mbox_sleep_ok != 0;
561 		int delay_time;
562 
563 		if (xs->xs_control & XS_CTL_POLL) {
564 			isp->isp_osinfo.mbox_sleep_ok = 0;
565 		}
566 
567 		if (isp->isp_osinfo.loop_checked == 0) {
568 			delay_time = 10 * 1000000;
569 			isp->isp_osinfo.loop_checked = 1;
570 		} else {
571 			delay_time = 250000;
572 		}
573 
574 		if (isp_fc_runstate(isp,  XS_CHANNEL(xs), delay_time) != 0) {
575 			if (xs->xs_control & XS_CTL_POLL) {
576 				isp->isp_osinfo.mbox_sleep_ok = ombi;
577 			}
578 			if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) {
579 				XS_SETERR(xs, HBA_SELTIMEOUT);
580 				scsipi_done(xs);
581 				ISP_UNLOCK(isp);
582 				return;
583 			}
584 			/*
585 			 * Otherwise, fall thru to be queued up for later.
586 			 */
587 		} else {
588 			int wasblocked =
589 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
590 			isp->isp_osinfo.blocked = isp->isp_osinfo.paused = 0;
591 			if (wasblocked) {
592 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
593 				    "THAW QUEUES @ LINE %d", __LINE__);
594 				scsipi_channel_thaw(&isp->isp_osinfo.chan[chan],
595 				    1);
596 			}
597 		}
598 		if (xs->xs_control & XS_CTL_POLL) {
599 			isp->isp_osinfo.mbox_sleep_ok = ombi;
600 		}
601 	}
602 
603 	if (isp->isp_osinfo.paused) {
604 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
605 		xs->error = XS_RESOURCE_SHORTAGE;
606 		scsipi_done(xs);
607 		ISP_UNLOCK(isp);
608 		return;
609 	}
610 	if (isp->isp_osinfo.blocked) {
611 		isp_prt(isp, ISP_LOGWARN,
612 		    "I/O while blocked with retries %d", xs, xs->xs_retries);
613 		if (xs->xs_retries) {
614 			xs->error = XS_REQUEUE;
615 			xs->xs_retries--;
616 		} else {
617 			XS_SETERR(xs, HBA_SELTIMEOUT);
618 		}
619 		scsipi_done(xs);
620 		ISP_UNLOCK(isp);
621 		return;
622 	}
623 
624 	if (xs->xs_control & XS_CTL_POLL) {
625 		ombi = isp->isp_osinfo.mbox_sleep_ok;
626 		isp->isp_osinfo.mbox_sleep_ok = 0;
627 	}
628 
629 	switch (isp_start(xs)) {
630 	case CMD_QUEUED:
631 		if (IS_FC(isp) && isp->isp_osinfo.wwns[XS_TGT(xs)] == 0) {
632 			fcparam *fcp = FCPARAM(isp, XS_CHANNEL(xs));
633 			int dbidx = fcp->isp_dev_map[XS_TGT(xs)] - 1;
634 			device_t dev = xs->xs_periph->periph_dev;
635 
636 			if (dbidx >= 0 && dev &&
637 			    prop_dictionary_set_uint64(device_properties(dev),
638 			    "port-wwn", fcp->portdb[dbidx].port_wwn) == TRUE) {
639 				isp->isp_osinfo.wwns[XS_TGT(xs)] =
640 				    fcp->portdb[dbidx].port_wwn;
641 			}
642                 }
643 		if (xs->xs_control & XS_CTL_POLL) {
644 			isp_polled_cmd_wait(isp, xs);
645 			isp->isp_osinfo.mbox_sleep_ok = ombi;
646 		} else if (xs->timeout) {
647 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
648 		}
649 		break;
650 	case CMD_EAGAIN:
651 		isp->isp_osinfo.paused = 1;
652 		xs->error = XS_RESOURCE_SHORTAGE;
653 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
654 		    "FREEZE QUEUES @ LINE %d", __LINE__);
655 		for (chan = 0; chan < isp->isp_nchan; chan++) {
656 			scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1);
657 		}
658 		scsipi_done(xs);
659 		break;
660 	case CMD_RQLATER:
661 		/*
662 		 * We can only get RQLATER from FC devices (1 channel only)
663 		 *
664 		 * If we've never seen loop up see if if we've been down
665 		 * quickboot time, otherwise wait loop down limit time.
666 		 * If so, then we start giving up on commands.
667 		 */
668 		if (FCPARAM(isp, XS_CHANNEL(xs))->loop_seen_once == 0) {
669 			lim = isp_quickboot_time;
670 		} else {
671 			lim = isp->isp_osinfo.loop_down_limit;
672 		}
673 		if (isp->isp_osinfo.loop_down_time >= lim) {
674 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
675 			    "RQLATER->SELTIMEOUT for %d (%d >= %d)", XS_TGT(xs),
676 			    isp->isp_osinfo.loop_down_time, lim);
677 			XS_SETERR(xs, HBA_SELTIMEOUT);
678 			scsipi_done(xs);
679 			break;
680 		}
681 		if (isp->isp_osinfo.blocked == 0) {
682 			isp->isp_osinfo.blocked = 1;
683 			scsipi_channel_freeze(&isp->isp_osinfo.chan[chan], 1);
684 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
685 			    "FREEZE QUEUES @ LINE %d", __LINE__);
686 		} else {
687 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
688 			    "RQLATER WITH FROZEN QUEUES @ LINE %d", __LINE__);
689 		}
690 		xs->error = XS_REQUEUE;
691 		scsipi_done(xs);
692 		break;
693 	case CMD_COMPLETE:
694 		scsipi_done(xs);
695 		break;
696 	}
697 	ISP_UNLOCK(isp);
698 }
699 
700 static void
701 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
702 {
703 	struct ispsoftc *isp = device_private(chan->chan_adapter->adapt_dev);
704 
705 	switch (req) {
706 	case ADAPTER_REQ_RUN_XFER:
707 		ispcmd(isp, (XS_T *) arg);
708 		break;
709 
710 	case ADAPTER_REQ_GROW_RESOURCES:
711 		/* Not supported. */
712 		break;
713 
714 	case ADAPTER_REQ_SET_XFER_MODE:
715 	if (IS_SCSI(isp)) {
716 		struct scsipi_xfer_mode *xm = arg;
717 		int dflags = 0;
718 		sdparam *sdp = SDPARAM(isp, chan->chan_channel);
719 
720 		if (xm->xm_mode & PERIPH_CAP_TQING)
721 			dflags |= DPARM_TQING;
722 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
723 			dflags |= DPARM_WIDE;
724 		if (xm->xm_mode & PERIPH_CAP_SYNC)
725 			dflags |= DPARM_SYNC;
726 		ISP_LOCK(isp);
727 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
728 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
729 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
730 		sdp->update = 1;
731 		ISP_UNLOCK(isp);
732 		isp_prt(isp, ISP_LOGDEBUG1,
733 		    "isprequest: device flags 0x%x for %d.%d.X",
734 		    dflags, chan->chan_channel, xm->xm_target);
735 		break;
736 	}
737 	default:
738 		break;
739 	}
740 }
741 
742 static void
743 isp_polled_cmd_wait(struct ispsoftc *isp, XS_T *xs)
744 {
745 	int infinite = 0, mswait;
746 
747 	/*
748 	 * If we can't use interrupts, poll on completion.
749 	 */
750 	if ((mswait = XS_TIME(xs)) == 0) {
751 		infinite = 1;
752 	}
753 
754 	while (mswait || infinite) {
755 		uint32_t isr;
756 		uint16_t sema, mbox;
757 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
758 			isp_intr(isp, isr, sema, mbox);
759 			if (XS_CMD_DONE_P(xs)) {
760 				break;
761 			}
762 		}
763 		ISP_DELAY(1000);
764 		mswait -= 1;
765 	}
766 
767 	/*
768 	 * If no other error occurred but we didn't finish
769 	 * something bad happened, so abort the command.
770 	 */
771 	if (XS_CMD_DONE_P(xs) == 0) {
772 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
773 			isp_reinit(isp, 0);
774 		}
775 		if (XS_NOERR(xs)) {
776 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
777 			XS_SETERR(xs, HBA_BOTCH);
778 		}
779 	}
780 	scsipi_done(xs);
781 }
782 
783 void
784 isp_done(XS_T *xs)
785 {
786 	if (XS_CMD_WDOG_P(xs) == 0) {
787 		struct ispsoftc *isp = XS_ISP(xs);
788 		callout_stop(&xs->xs_callout);
789 		if (XS_CMD_GRACE_P(xs)) {
790 			isp_prt(isp, ISP_LOGDEBUG1,
791 			    "finished command on borrowed time");
792 		}
793 		XS_CMD_S_CLEAR(xs);
794 		/*
795 		 * Fixup- if we get a QFULL, we need
796 		 * to set XS_BUSY as the error.
797 		 */
798 		if (xs->status == SCSI_QUEUE_FULL) {
799 			xs->error = XS_BUSY;
800 		}
801 		if (isp->isp_osinfo.paused) {
802 			int i;
803 			isp->isp_osinfo.paused = 0;
804 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
805 			    "THAW QUEUES @ LINE %d", __LINE__);
806 			for (i = 0; i < isp->isp_nchan; i++) {
807 				scsipi_channel_timed_thaw(&isp->isp_osinfo.chan[i]);
808 			}
809 		}
810 		if (xs->error == XS_DRIVER_STUFFUP) {
811 			isp_prt(isp, ISP_LOGERR,
812 			    "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
813 			    XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs),
814 			    XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
815 		}
816 		scsipi_done(xs);
817 	}
818 }
819 
820 static void
821 isp_dog(void *arg)
822 {
823 	XS_T *xs = arg;
824 	struct ispsoftc *isp = XS_ISP(xs);
825 	uint32_t handle;
826 	int sok;
827 
828 
829 	ISP_ILOCK(isp);
830 	sok = isp->isp_osinfo.mbox_sleep_ok;
831 	isp->isp_osinfo.mbox_sleep_ok = 0;
832 	/*
833 	 * We've decided this command is dead. Make sure we're not trying
834 	 * to kill a command that's already dead by getting its handle and
835 	 * and seeing whether it's still alive.
836 	 */
837 	handle = isp_find_handle(isp, xs);
838 	if (handle) {
839 		uint32_t isr;
840 		uint16_t mbox, sema;
841 
842 		if (XS_CMD_DONE_P(xs)) {
843 			isp_prt(isp, ISP_LOGDEBUG1,
844 			    "watchdog found done cmd (handle 0x%x)", handle);
845 			goto out;
846 		}
847 
848 		if (XS_CMD_WDOG_P(xs)) {
849 			isp_prt(isp, ISP_LOGDEBUG1,
850 			    "recursive watchdog (handle 0x%x)", handle);
851 			goto out;
852 		}
853 
854 		XS_CMD_S_WDOG(xs);
855 
856 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
857 			isp_intr(isp, isr, sema, mbox);
858 
859 		}
860 		if (XS_CMD_DONE_P(xs)) {
861 			isp_prt(isp, ISP_LOGDEBUG1,
862 			    "watchdog cleanup for handle 0x%x", handle);
863 			XS_CMD_C_WDOG(xs);
864 			isp_done(xs);
865 		} else if (XS_CMD_GRACE_P(xs)) {
866 			isp_prt(isp, ISP_LOGDEBUG1,
867 			    "watchdog timeout for handle 0x%x", handle);
868 			/*
869 			 * Make sure the command is *really* dead before we
870 			 * release the handle (and DMA resources) for reuse.
871 			 */
872 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
873 
874 			/*
875 			 * After this point, the command is really dead.
876 			 */
877 			if (XS_XFRLEN(xs)) {
878 				ISP_DMAFREE(isp, xs, handle);
879 			}
880 			isp_destroy_handle(isp, handle);
881 			XS_SETERR(xs, XS_TIMEOUT);
882 			XS_CMD_S_CLEAR(xs);
883 			isp_done(xs);
884 		} else {
885 			void *qe;
886 			isp_marker_t local, *mp = &local;
887 			isp_prt(isp, ISP_LOGDEBUG2,
888 			    "possible command timeout on handle %x", handle);
889 			XS_CMD_C_WDOG(xs);
890 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
891 			qe = isp_getrqentry(isp);
892 			if (qe == NULL)
893 				goto out;
894 			XS_CMD_S_GRACE(xs);
895 			ISP_MEMZERO((void *) mp, sizeof (*mp));
896 			mp->mrk_header.rqs_entry_count = 1;
897 			mp->mrk_header.rqs_entry_type = RQSTYPE_MARKER;
898 			mp->mrk_modifier = SYNC_ALL;
899 			mp->mrk_target = XS_CHANNEL(xs) << 7;
900 			isp_put_marker(isp, mp, qe);
901 			ISP_SYNC_REQUEST(isp);
902 		}
903 	} else {
904 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
905 	}
906 out:
907 	isp->isp_osinfo.mbox_sleep_ok = sok;
908 	ISP_IUNLOCK(isp);
909 }
910 
911 /*
912  * Gone Device Timer Function- when we have decided that a device has gone
913  * away, we wait a specific period of time prior to telling the OS it has
914  * gone away.
915  *
916  * This timer function fires once a second and then scans the port database
917  * for devices that are marked dead but still have a virtual target assigned.
918  * We decrement a counter for that port database entry, and when it hits zero,
919  * we tell the OS the device has gone away.
920  */
921 static void
922 isp_gdt(void *arg)
923 {
924 	ispsoftc_t *isp = arg;
925 	fcportdb_t *lp;
926 	int dbidx, tgt, more_to_do = 0;
927 
928 	isp_prt(isp, ISP_LOGDEBUG0, "GDT timer expired");
929 	ISP_LOCK(isp);
930 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
931 		lp = &FCPARAM(isp, 0)->portdb[dbidx];
932 
933 		if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
934 			continue;
935 		}
936 		if (lp->dev_map_idx == 0) {
937 			continue;
938 		}
939 		if (lp->new_reserved == 0) {
940 			continue;
941 		}
942 		lp->new_reserved -= 1;
943 		if (lp->new_reserved != 0) {
944 			more_to_do++;
945 			continue;
946 		}
947 		tgt = lp->dev_map_idx - 1;
948 		FCPARAM(isp, 0)->isp_dev_map[tgt] = 0;
949 		lp->dev_map_idx = 0;
950 		lp->state = FC_PORTDB_STATE_NIL;
951 		isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
952 		    "Gone Device Timeout");
953 		isp_make_gone(isp, tgt);
954 	}
955 	if (more_to_do) {
956 		callout_schedule(&isp->isp_osinfo.gdt, hz);
957 	} else {
958 		isp->isp_osinfo.gdt_running = 0;
959 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
960 		    "stopping Gone Device Timer");
961 	}
962 	ISP_UNLOCK(isp);
963 }
964 
965 /*
966  * Loop Down Timer Function- when loop goes down, a timer is started and
967  * and after it expires we come here and take all probational devices that
968  * the OS knows about and the tell the OS that they've gone away.
969  *
970  * We don't clear the devices out of our port database because, when loop
971  * come back up, we have to do some actual cleanup with the chip at that
972  * point (implicit PLOGO, e.g., to get the chip's port database state right).
973  */
974 static void
975 isp_ldt(void *arg)
976 {
977 	ispsoftc_t *isp = arg;
978 	fcportdb_t *lp;
979 	int dbidx, tgt;
980 
981 	isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "Loop Down Timer expired");
982 	ISP_LOCK(isp);
983 
984 	/*
985 	 * Notify to the OS all targets who we now consider have departed.
986 	 */
987 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
988 		lp = &FCPARAM(isp, 0)->portdb[dbidx];
989 
990 		if (lp->state != FC_PORTDB_STATE_PROBATIONAL) {
991 			continue;
992 		}
993 		if (lp->dev_map_idx == 0) {
994 			continue;
995 		}
996 
997 		/*
998 		 * XXX: CLEAN UP AND COMPLETE ANY PENDING COMMANDS FIRST!
999 		 */
1000 
1001 		/*
1002 		 * Mark that we've announced that this device is gone....
1003 		 */
1004 		lp->reserved = 1;
1005 
1006 		/*
1007 		 * but *don't* change the state of the entry. Just clear
1008 		 * any target id stuff and announce to CAM that the
1009 		 * device is gone. This way any necessary PLOGO stuff
1010 		 * will happen when loop comes back up.
1011 		 */
1012 
1013 		tgt = lp->dev_map_idx - 1;
1014 		FCPARAM(isp, 0)->isp_dev_map[tgt] = 0;
1015 		lp->dev_map_idx = 0;
1016 		isp_prt(isp, ISP_LOGCONFIG, prom3, lp->portid, tgt,
1017 		    "Loop Down Timeout");
1018 		isp_make_gone(isp, tgt);
1019 	}
1020 
1021 	/*
1022 	 * The loop down timer has expired. Wake up the kthread
1023 	 * to notice that fact (or make it false).
1024 	 */
1025 	isp->isp_osinfo.loop_down_time = isp->isp_osinfo.loop_down_limit+1;
1026 	wakeup(&isp->isp_osinfo.thread);
1027 	ISP_UNLOCK(isp);
1028 }
1029 
1030 static void
1031 isp_make_here(ispsoftc_t *isp, int tgt)
1032 {
1033 	isp_prt(isp, ISP_LOGINFO, "target %d has arrived", tgt);
1034 }
1035 
1036 static void
1037 isp_make_gone(ispsoftc_t *isp, int tgt)
1038 {
1039 	isp_prt(isp, ISP_LOGINFO, "target %d has departed", tgt);
1040 }
1041 
1042 static void
1043 isp_fc_worker(void *arg)
1044 {
1045 	void scsipi_run_queue(struct scsipi_channel *);
1046 	ispsoftc_t *isp = arg;
1047 	int slp = 0;
1048 	int chan = 0;
1049 
1050 	int s = splbio();
1051 	/*
1052 	 * The first loop is for our usage where we have yet to have
1053 	 * gotten good fibre channel state.
1054 	 */
1055 	while (isp->isp_osinfo.thread != NULL) {
1056 		int sok, lb, lim;
1057 
1058 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "checking FC state");
1059 		sok = isp->isp_osinfo.mbox_sleep_ok;
1060 		isp->isp_osinfo.mbox_sleep_ok = 1;
1061 		lb = isp_fc_runstate(isp, chan, 250000);
1062 		isp->isp_osinfo.mbox_sleep_ok = sok;
1063 		if (lb) {
1064 			/*
1065 			 * Increment loop down time by the last sleep interval
1066 			 */
1067 			isp->isp_osinfo.loop_down_time += slp;
1068 
1069 			if (lb < 0) {
1070 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1071 				    "FC loop not up (down count %d)",
1072 				    isp->isp_osinfo.loop_down_time);
1073 			} else {
1074 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1075 				    "FC got to %d (down count %d)",
1076 				    lb, isp->isp_osinfo.loop_down_time);
1077 			}
1078 
1079 
1080 			/*
1081 			 * If we've never seen loop up and we've waited longer
1082 			 * than quickboot time, or we've seen loop up but we've
1083 			 * waited longer than loop_down_limit, give up and go
1084 			 * to sleep until loop comes up.
1085 			 */
1086 			if (FCPARAM(isp, 0)->loop_seen_once == 0) {
1087 				lim = isp_quickboot_time;
1088 			} else {
1089 				lim = isp->isp_osinfo.loop_down_limit;
1090 			}
1091 			if (isp->isp_osinfo.loop_down_time >= lim) {
1092 				/*
1093 				 * If we're now past our limit, release
1094 				 * the queues and let them come in and
1095 				 * either get HBA_SELTIMOUT or cause
1096 				 * another freeze.
1097 				 */
1098 				isp->isp_osinfo.blocked = 1;
1099 				slp = 0;
1100 			} else if (isp->isp_osinfo.loop_down_time < 10) {
1101 				slp = 1;
1102 			} else if (isp->isp_osinfo.loop_down_time < 30) {
1103 				slp = 5;
1104 			} else if (isp->isp_osinfo.loop_down_time < 60) {
1105 				slp = 10;
1106 			} else if (isp->isp_osinfo.loop_down_time < 120) {
1107 				slp = 20;
1108 			} else {
1109 				slp = 30;
1110 			}
1111 
1112 		} else {
1113 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1114 			    "FC state OK");
1115 			isp->isp_osinfo.loop_down_time = 0;
1116 			slp = 0;
1117 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1118 			    "THAW QUEUES @ LINE %d", __LINE__);
1119 			scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1);
1120 		}
1121 
1122 		/*
1123 		 * If we'd frozen the queues, unfreeze them now so that
1124 		 * we can start getting commands. If the FC state isn't
1125 		 * okay yet, they'll hit that in isp_start which will
1126 		 * freeze the queues again.
1127 		 */
1128 		if (isp->isp_osinfo.blocked) {
1129 			isp->isp_osinfo.blocked = 0;
1130 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1131 			    "THAW QUEUES @ LINE %d", __LINE__);
1132 			scsipi_channel_thaw(&isp->isp_osinfo.chan[chan], 1);
1133 		}
1134 		isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0, "sleep time %d", slp);
1135 		tsleep(&isp->isp_osinfo.thread, PRIBIO, "ispf", slp * hz);
1136 
1137 		/*
1138 		 * If slp is zero, we're waking up for the first time after
1139 		 * things have been okay. In this case, we set a deferral state
1140 		 * for all commands and delay hysteresis seconds before starting
1141 		 * the FC state evaluation. This gives the loop/fabric a chance
1142 		 * to settle.
1143 		 */
1144 		if (slp == 0 && isp_fabric_hysteresis) {
1145 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1146 			    "sleep hysteresis tick time %d",
1147 			    isp_fabric_hysteresis * hz);
1148 			(void) tsleep(&isp_fabric_hysteresis, PRIBIO, "ispT",
1149 			    (isp_fabric_hysteresis * hz));
1150 		}
1151 	}
1152 	splx(s);
1153 
1154 	/* In case parent is waiting for us to exit. */
1155 	wakeup(&isp->isp_osinfo.thread);
1156 	kthread_exit(0);
1157 }
1158 
1159 /*
1160  * Free any associated resources prior to decommissioning and
1161  * set the card to a known state (so it doesn't wake up and kick
1162  * us when we aren't expecting it to).
1163  *
1164  * Locks are held before coming here.
1165  */
1166 void
1167 isp_uninit(struct ispsoftc *isp)
1168 {
1169 	isp_lock(isp);
1170 	/*
1171 	 * Leave with interrupts disabled.
1172 	 */
1173 	ISP_DISABLE_INTS(isp);
1174 	isp_unlock(isp);
1175 }
1176 
1177 void
1178 isp_async(struct ispsoftc *isp, ispasync_t cmd, ...)
1179 {
1180 	int bus, tgt;
1181 	const char *msg = NULL;
1182 	static const char prom[] =
1183 	    "PortID 0x%06x handle 0x%x role %s %s\n"
1184 	    "      WWNN 0x%08x%08x WWPN 0x%08x%08x";
1185 	static const char prom2[] =
1186 	    "PortID 0x%06x handle 0x%x role %s %s tgt %u\n"
1187 	    "      WWNN 0x%08x%08x WWPN 0x%08x%08x";
1188 	fcportdb_t *lp;
1189 	va_list ap;
1190 
1191 	switch (cmd) {
1192 	case ISPASYNC_NEW_TGT_PARAMS:
1193 	if (IS_SCSI(isp)) {
1194 		sdparam *sdp;
1195 		int flags;
1196 		struct scsipi_xfer_mode xm;
1197 
1198 		va_start(ap, cmd);
1199 		bus = va_arg(ap, int);
1200 		tgt = va_arg(ap, int);
1201 		va_end(ap);
1202 		sdp = SDPARAM(isp, bus);
1203 		flags = sdp->isp_devparam[tgt].actv_flags;
1204 
1205 		xm.xm_mode = 0;
1206 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
1207 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
1208 		xm.xm_target = tgt;
1209 
1210 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
1211 			xm.xm_mode |= PERIPH_CAP_SYNC;
1212 		if (flags & DPARM_WIDE)
1213 			xm.xm_mode |= PERIPH_CAP_WIDE16;
1214 		if (flags & DPARM_TQING)
1215 			xm.xm_mode |= PERIPH_CAP_TQING;
1216 		scsipi_async_event(&isp->isp_osinfo.chan[bus],
1217 		    ASYNC_EVENT_XFER_MODE, &xm);
1218 		break;
1219 	}
1220 	case ISPASYNC_BUS_RESET:
1221 		va_start(ap, cmd);
1222 		bus = va_arg(ap, int);
1223 		va_end(ap);
1224 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
1225 		scsipi_async_event(&isp->isp_osinfo.chan[bus],
1226 		    ASYNC_EVENT_RESET, NULL);
1227 		break;
1228 	case ISPASYNC_LIP:
1229 		if (msg == NULL) {
1230 			msg = "LIP Received";
1231 		}
1232 		/* FALLTHROUGH */
1233 	case ISPASYNC_LOOP_RESET:
1234 		if (msg == NULL) {
1235 			msg = "LOOP Reset Received";
1236 		}
1237 		/* FALLTHROUGH */
1238 	case ISPASYNC_LOOP_DOWN:
1239 		if (msg == NULL) {
1240 			msg = "Loop DOWN";
1241 		}
1242 		va_start(ap, cmd);
1243 		bus = va_arg(ap, int);
1244 		va_end(ap);
1245 
1246 		/*
1247 		 * Don't do queue freezes or blockage until we have the
1248 		 * thread running and interrupts that can unfreeze/unblock us.
1249 		 */
1250 		if (isp->isp_osinfo.mbox_sleep_ok &&
1251 		    isp->isp_osinfo.blocked == 0 &&
1252 		    isp->isp_osinfo.thread) {
1253 			isp->isp_osinfo.blocked = 1;
1254 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1255 			    "FREEZE QUEUES @ LINE %d", __LINE__);
1256 			scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1257 			if (callout_pending(&isp->isp_osinfo.ldt) == 0) {
1258 				callout_schedule(&isp->isp_osinfo.ldt,
1259 				    isp->isp_osinfo.loop_down_limit * hz);
1260 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1261 				   "Starting Loop Down Timer");
1262 			}
1263 		}
1264 		isp_prt(isp, ISP_LOGINFO, msg);
1265 		break;
1266         case ISPASYNC_LOOP_UP:
1267 		/*
1268 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
1269 		 * the FC worker thread. When the FC worker thread
1270 		 * is done, let *it* call scsipi_channel_thaw...
1271 		 */
1272 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
1273 		break;
1274 	case ISPASYNC_DEV_ARRIVED:
1275 		va_start(ap, cmd);
1276 		bus = va_arg(ap, int);
1277 		lp = va_arg(ap, fcportdb_t *);
1278 		va_end(ap);
1279 		lp->reserved = 0;
1280 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
1281 		    (lp->roles & (SVC3_TGT_ROLE >> SVC3_ROLE_SHIFT))) {
1282 			int dbidx = lp - FCPARAM(isp, bus)->portdb;
1283 			int i;
1284 
1285 			for (i = 0; i < MAX_FC_TARG; i++) {
1286 				if (i >= FL_ID && i <= SNS_ID) {
1287 					continue;
1288 				}
1289 				if (FCPARAM(isp, bus)->isp_dev_map[i] == 0) {
1290 					break;
1291 				}
1292 			}
1293 			if (i < MAX_FC_TARG) {
1294 				FCPARAM(isp, bus)->isp_dev_map[i] = dbidx + 1;
1295 				lp->dev_map_idx = i + 1;
1296 			} else {
1297 				isp_prt(isp, ISP_LOGWARN, "out of target ids");
1298 				isp_dump_portdb(isp, bus);
1299 			}
1300 		}
1301 		if (lp->dev_map_idx) {
1302 			tgt = lp->dev_map_idx - 1;
1303 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1304 			    lp->portid, lp->handle,
1305 		            roles[lp->roles], "arrived at", tgt,
1306 		    	    (uint32_t) (lp->node_wwn >> 32),
1307 			    (uint32_t) lp->node_wwn,
1308 		    	    (uint32_t) (lp->port_wwn >> 32),
1309 			    (uint32_t) lp->port_wwn);
1310 			isp_make_here(isp, tgt);
1311 		} else {
1312 			isp_prt(isp, ISP_LOGCONFIG, prom,
1313 			    lp->portid, lp->handle,
1314 		            roles[lp->roles], "arrived",
1315 		    	    (uint32_t) (lp->node_wwn >> 32),
1316 			    (uint32_t) lp->node_wwn,
1317 		    	    (uint32_t) (lp->port_wwn >> 32),
1318 			    (uint32_t) lp->port_wwn);
1319 		}
1320 		break;
1321 	case ISPASYNC_DEV_CHANGED:
1322 		va_start(ap, cmd);
1323 		bus = va_arg(ap, int);
1324 		lp = va_arg(ap, fcportdb_t *);
1325 		va_end(ap);
1326 		if (isp_change_is_bad) {
1327 			lp->state = FC_PORTDB_STATE_NIL;
1328 			if (lp->dev_map_idx) {
1329 				tgt = lp->dev_map_idx - 1;
1330 				FCPARAM(isp, bus)->isp_dev_map[tgt] = 0;
1331 				lp->dev_map_idx = 0;
1332 				isp_prt(isp, ISP_LOGCONFIG, prom3,
1333 				    lp->portid, tgt, "change is bad");
1334 				isp_make_gone(isp, tgt);
1335 			} else {
1336 				isp_prt(isp, ISP_LOGCONFIG, prom,
1337 				    lp->portid, lp->handle,
1338 				    roles[lp->roles],
1339 				    "changed and departed",
1340 				    (uint32_t) (lp->node_wwn >> 32),
1341 				    (uint32_t) lp->node_wwn,
1342 				    (uint32_t) (lp->port_wwn >> 32),
1343 				    (uint32_t) lp->port_wwn);
1344 			}
1345 		} else {
1346 			lp->portid = lp->new_portid;
1347 			lp->roles = lp->new_roles;
1348 			if (lp->dev_map_idx) {
1349 				int t = lp->dev_map_idx - 1;
1350 				FCPARAM(isp, bus)->isp_dev_map[t] =
1351 				    (lp - FCPARAM(isp, bus)->portdb) + 1;
1352 				tgt = lp->dev_map_idx - 1;
1353 				isp_prt(isp, ISP_LOGCONFIG, prom2,
1354 				    lp->portid, lp->handle,
1355 				    roles[lp->roles], "changed at", tgt,
1356 				    (uint32_t) (lp->node_wwn >> 32),
1357 				    (uint32_t) lp->node_wwn,
1358 				    (uint32_t) (lp->port_wwn >> 32),
1359 				    (uint32_t) lp->port_wwn);
1360 			} else {
1361 				isp_prt(isp, ISP_LOGCONFIG, prom,
1362 				    lp->portid, lp->handle,
1363 				    roles[lp->roles], "changed",
1364 				    (uint32_t) (lp->node_wwn >> 32),
1365 				    (uint32_t) lp->node_wwn,
1366 				    (uint32_t) (lp->port_wwn >> 32),
1367 				    (uint32_t) lp->port_wwn);
1368 			}
1369 		}
1370 		break;
1371 	case ISPASYNC_DEV_STAYED:
1372 		va_start(ap, cmd);
1373 		bus = va_arg(ap, int);
1374 		lp = va_arg(ap, fcportdb_t *);
1375 		va_end(ap);
1376 		if (lp->dev_map_idx) {
1377 			tgt = lp->dev_map_idx - 1;
1378 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1379 			    lp->portid, lp->handle,
1380 		    	    roles[lp->roles], "stayed at", tgt,
1381 			    (uint32_t) (lp->node_wwn >> 32),
1382 			    (uint32_t) lp->node_wwn,
1383 		    	    (uint32_t) (lp->port_wwn >> 32),
1384 			    (uint32_t) lp->port_wwn);
1385 		} else {
1386 			isp_prt(isp, ISP_LOGCONFIG, prom,
1387 			    lp->portid, lp->handle,
1388 		    	    roles[lp->roles], "stayed",
1389 			    (uint32_t) (lp->node_wwn >> 32),
1390 			    (uint32_t) lp->node_wwn,
1391 		    	    (uint32_t) (lp->port_wwn >> 32),
1392 			    (uint32_t) lp->port_wwn);
1393 		}
1394 		break;
1395 	case ISPASYNC_DEV_GONE:
1396 		va_start(ap, cmd);
1397 		bus = va_arg(ap, int);
1398 		lp = va_arg(ap, fcportdb_t *);
1399 		va_end(ap);
1400 		/*
1401 		 * If this has a virtual target and we haven't marked it
1402 		 * that we're going to have isp_gdt tell the OS it's gone,
1403 		 * set the isp_gdt timer running on it.
1404 		 *
1405 		 * If it isn't marked that isp_gdt is going to get rid of it,
1406 		 * announce that it's gone.
1407 		 */
1408 		if (lp->dev_map_idx && lp->reserved == 0) {
1409 			lp->reserved = 1;
1410 			lp->new_reserved = isp->isp_osinfo.gone_device_time;
1411 			lp->state = FC_PORTDB_STATE_ZOMBIE;
1412 			if (isp->isp_osinfo.gdt_running == 0) {
1413 				isp->isp_osinfo.gdt_running = 1;
1414 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1415 				    "starting Gone Device Timer");
1416 				callout_schedule(&isp->isp_osinfo.gdt, hz);
1417 			}
1418 			tgt = lp->dev_map_idx - 1;
1419 			isp_prt(isp, ISP_LOGCONFIG, prom2,
1420 			    lp->portid, lp->handle,
1421 		            roles[lp->roles], "gone zombie at", tgt,
1422 		    	    (uint32_t) (lp->node_wwn >> 32),
1423 			    (uint32_t) lp->node_wwn,
1424 		    	    (uint32_t) (lp->port_wwn >> 32),
1425 			    (uint32_t) lp->port_wwn);
1426 		} else if (lp->reserved == 0) {
1427 			isp_prt(isp, ISP_LOGCONFIG, prom,
1428 			    lp->portid, lp->handle,
1429 			    roles[lp->roles], "departed",
1430 			    (uint32_t) (lp->node_wwn >> 32),
1431 			    (uint32_t) lp->node_wwn,
1432 			    (uint32_t) (lp->port_wwn >> 32),
1433 			    (uint32_t) lp->port_wwn);
1434 		}
1435 		break;
1436 	case ISPASYNC_CHANGE_NOTIFY:
1437 	{
1438 		int opt;
1439 
1440 		va_start(ap, cmd);
1441 		bus = va_arg(ap, int);
1442 		opt = va_arg(ap, int);
1443 		va_end(ap);
1444 
1445 		if (opt == ISPASYNC_CHANGE_PDB) {
1446 			msg = "Port Database Changed";
1447 		} else if (opt == ISPASYNC_CHANGE_SNS) {
1448 			msg = "Name Server Database Changed";
1449 		} else {
1450 			msg = "Other Change Notify";
1451 		}
1452 		/*
1453 		 * If the loop down timer is running, cancel it.
1454 		 */
1455 		if (callout_pending(&isp->isp_osinfo.ldt)) {
1456 			callout_stop(&isp->isp_osinfo.ldt);
1457 			isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1458 			   "Stopping Loop Down Timer");
1459 		}
1460 		isp_prt(isp, ISP_LOGINFO, msg);
1461 		/*
1462 		 * We can set blocked here because we know it's now okay
1463 		 * to try and run isp_fc_runstate (in order to build loop
1464 		 * state). But we don't try and freeze the midlayer's queue
1465 		 * if we have no thread that we can wake to later unfreeze
1466 		 * it.
1467 		 */
1468 		if (isp->isp_osinfo.blocked == 0) {
1469 			isp->isp_osinfo.blocked = 1;
1470 			if (isp->isp_osinfo.thread) {
1471 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1472 				    "FREEZE QUEUES @ LINE %d", __LINE__);
1473 				scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1474 			}
1475 		}
1476 		/*
1477 		 * Note that we have work for the thread to do, and
1478 		 * if the thread is here already, wake it up.
1479 		 */
1480 		if (isp->isp_osinfo.thread) {
1481 			wakeup(&isp->isp_osinfo.thread);
1482 		} else {
1483 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
1484 		}
1485 		break;
1486 	}
1487 	case ISPASYNC_FW_CRASH:
1488 	{
1489 		uint16_t mbox1;
1490 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1491 		if (IS_DUALBUS(isp)) {
1492 			bus = ISP_READ(isp, OUTMAILBOX6);
1493 		} else {
1494 			bus = 0;
1495 		}
1496                 isp_prt(isp, ISP_LOGERR,
1497                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1498                     bus, mbox1);
1499 		if (IS_FC(isp)) {
1500 			if (isp->isp_osinfo.blocked == 0) {
1501 				isp->isp_osinfo.blocked = 1;
1502 				isp_prt(isp, ISP_LOGSANCFG|ISP_LOGDEBUG0,
1503 				    "FREEZE QUEUES @ LINE %d", __LINE__);
1504 				scsipi_channel_freeze(&isp->isp_osinfo.chan[bus], 1);
1505 			}
1506 		}
1507 		mbox1 = isp->isp_osinfo.mbox_sleep_ok;
1508 		isp->isp_osinfo.mbox_sleep_ok = 0;
1509 		isp_reinit(isp, 0);
1510 		isp->isp_osinfo.mbox_sleep_ok = mbox1;
1511 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
1512 		break;
1513 	}
1514 	default:
1515 		break;
1516 	}
1517 }
1518 
1519 void
1520 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1521 {
1522 	va_list ap;
1523 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1524 		return;
1525 	}
1526 	printf("%s: ", device_xname(isp->isp_osinfo.dev));
1527 	va_start(ap, fmt);
1528 	vprintf(fmt, ap);
1529 	va_end(ap);
1530 	printf("\n");
1531 }
1532 
1533 void
1534 isp_xs_prt(struct ispsoftc *isp, XS_T *xs, int level, const char *fmt, ...)
1535 {
1536 	va_list ap;
1537 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1538 		return;
1539 	}
1540 	scsipi_printaddr(xs->xs_periph);
1541 	va_start(ap, fmt);
1542 	vprintf(fmt, ap);
1543 	va_end(ap);
1544 	printf("\n");
1545 }
1546 
1547 void
1548 isp_lock(struct ispsoftc *isp)
1549 {
1550 	int s = splbio();
1551 	if (isp->isp_osinfo.islocked++ == 0) {
1552 		isp->isp_osinfo.splsaved = s;
1553 	} else {
1554 		splx(s);
1555 	}
1556 }
1557 
1558 void
1559 isp_unlock(struct ispsoftc *isp)
1560 {
1561 	if (isp->isp_osinfo.islocked-- <= 1) {
1562 		isp->isp_osinfo.islocked = 0;
1563 		splx(isp->isp_osinfo.splsaved);
1564 	}
1565 }
1566 
1567 uint64_t
1568 isp_microtime_sub(struct timeval *b, struct timeval *a)
1569 {
1570 	struct timeval x;
1571 	uint64_t elapsed;
1572 	timersub(b, a, &x);
1573 	elapsed = GET_NANOSEC(&x);
1574 	if (elapsed == 0)
1575 		elapsed++;
1576 	return (elapsed);
1577 }
1578 
1579 int
1580 isp_mbox_acquire(ispsoftc_t *isp)
1581 {
1582 	if (isp->isp_osinfo.mboxbsy) {
1583 		return (1);
1584 	} else {
1585 		isp->isp_osinfo.mboxcmd_done = 0;
1586 		isp->isp_osinfo.mboxbsy = 1;
1587 		return (0);
1588 	}
1589 }
1590 
1591 void
1592 isp_mbox_wait_complete(struct ispsoftc *isp, mbreg_t *mbp)
1593 {
1594 	unsigned int usecs = mbp->timeout;
1595 	unsigned int maxc, olim, ilim;
1596 	struct timeval start;
1597 
1598 	if (usecs == 0) {
1599 		usecs = MBCMD_DEFAULT_TIMEOUT;
1600 	}
1601 	maxc = isp->isp_mbxwrk0 + 1;
1602 
1603 	microtime(&start);
1604 	if (isp->isp_osinfo.mbox_sleep_ok) {
1605 		int to;
1606 		struct timeval tv, utv;
1607 
1608 		tv.tv_sec = 0;
1609 		tv.tv_usec = 0;
1610 		for (olim = 0; olim < maxc; olim++) {
1611 			utv.tv_sec = usecs / 1000000;
1612 			utv.tv_usec = usecs % 1000000;
1613 			timeradd(&tv, &utv, &tv);
1614 		}
1615 		to = tvtohz(&tv);
1616 		if (to == 0)
1617 			to = 1;
1618 		timeradd(&tv, &start, &tv);
1619 
1620 		isp->isp_osinfo.mbox_sleep_ok = 0;
1621 		isp->isp_osinfo.mbox_sleeping = 1;
1622 		tsleep(&isp->isp_mbxworkp, PRIBIO, "ispmbx_sleep", to);
1623 		isp->isp_osinfo.mbox_sleeping = 0;
1624 		isp->isp_osinfo.mbox_sleep_ok = 1;
1625 	} else {
1626 		for (olim = 0; olim < maxc; olim++) {
1627 			for (ilim = 0; ilim < usecs; ilim += 100) {
1628 				uint32_t isr;
1629 				uint16_t sema, mbox;
1630 				if (isp->isp_osinfo.mboxcmd_done) {
1631 					break;
1632 				}
1633 				if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1634 					isp_intr(isp, isr, sema, mbox);
1635 					if (isp->isp_osinfo.mboxcmd_done) {
1636 						break;
1637 					}
1638 				}
1639 				ISP_DELAY(100);
1640 			}
1641 			if (isp->isp_osinfo.mboxcmd_done) {
1642 				break;
1643 			}
1644 		}
1645 	}
1646 	if (isp->isp_osinfo.mboxcmd_done == 0) {
1647 		struct timeval finish, elapsed;
1648 
1649 		microtime(&finish);
1650 		timersub(&finish, &start, &elapsed);
1651 		isp_prt(isp, ISP_LOGWARN,
1652 		    "%s Mailbox Command (0x%x) Timeout (%uus actual)",
1653 		    isp->isp_osinfo.mbox_sleep_ok? "Interrupting" : "Polled",
1654 		    isp->isp_lastmbxcmd, (elapsed.tv_sec * 1000000) +
1655 		    elapsed.tv_usec);
1656 		mbp->param[0] = MBOX_TIMEOUT;
1657 		isp->isp_osinfo.mboxcmd_done = 1;
1658 	}
1659 }
1660 
1661 void
1662 isp_mbox_notify_done(ispsoftc_t *isp)
1663 {
1664 	if (isp->isp_osinfo.mbox_sleeping) {
1665 		wakeup(&isp->isp_mbxworkp);
1666 	}
1667 	isp->isp_osinfo.mboxcmd_done = 1;
1668 }
1669 
1670 void
1671 isp_mbox_release(ispsoftc_t *isp)
1672 {
1673 	isp->isp_osinfo.mboxbsy = 0;
1674 }
1675