xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 08c81a9c2dc8c7300e893321eb65c0925d60871c)
1 /* $NetBSD: isp_netbsd.c,v 1.57 2002/09/01 22:30:09 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.57 2002/09/01 22:30:09 mjacob Exp $");
63 
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66 
67 
68 /*
69  * Set a timeout for the watchdogging of a command.
70  *
71  * The dimensional analysis is
72  *
73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74  *
75  *			=
76  *
77  *	(milliseconds / 1000) * hz = ticks
78  *
79  *
80  * For timeouts less than 1 second, we'll get zero. Because of this, and
81  * because we want to establish *our* timeout to be longer than what the
82  * firmware might do, we just add 3 seconds at the back end.
83  */
84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
85 
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93 
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98 
99 /*
100  * Complete attachment of hardware, include subdevices.
101  */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 	isp->isp_state = ISP_RUNSTATE;
106 
107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 	/*
111 	 * It's not stated whether max_periph is limited by SPI
112 	 * tag uage, but let's assume that it is.
113 	 */
114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 	} else {
120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 	}
122 
123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 	isp->isp_osinfo._chan.chan_channel = 0;
126 
127 	/*
128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 	 */
130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131 
132 	if (IS_FC(isp)) {
133 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
134 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
135 		isp->isp_osinfo.threadwork = 1;
136 		/*
137 		 * Note that isp_create_fc_worker won't get called
138 		 * until much much later (after proc0 is created).
139 		 */
140 		kthread_create(isp_create_fc_worker, isp);
141 #ifdef	ISP_FW_CRASH_DUMP
142 		if (IS_2200(isp)) {
143 			FCPARAM(isp)->isp_dump_data =
144 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
145 				M_NOWAIT);
146 		} else if (IS_23XX(isp)) {
147 			FCPARAM(isp)->isp_dump_data =
148 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
149 				M_NOWAIT);
150 		}
151 		if (FCPARAM(isp)->isp_dump_data)
152 			FCPARAM(isp)->isp_dump_data[0] = 0;
153 #endif
154 	} else {
155 		int bus = 0;
156 		sdparam *sdp = isp->isp_param;
157 
158 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
159 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
160 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
161 		if (IS_DUALBUS(isp)) {
162 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
163 			sdp++;
164 			isp->isp_osinfo.discovered[1] =
165 			    1 << sdp->isp_initiator_id;
166 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
167 			isp->isp_osinfo._chan_b.chan_channel = 1;
168 		}
169 		ISP_LOCK(isp);
170 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 		if (IS_DUALBUS(isp)) {
172 			bus++;
173 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 		}
175 		ISP_UNLOCK(isp);
176 	}
177 
178 
179 	/*
180          * Defer enabling mailbox interrupts until later.
181          */
182         config_interrupts((struct device *) isp, isp_config_interrupts);
183 
184 	/*
185 	 * And attach children (if any).
186 	 */
187 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
188 	if (IS_DUALBUS(isp)) {
189 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
190 	}
191 }
192 
193 
194 static void
195 isp_config_interrupts(struct device *self)
196 {
197         struct ispsoftc *isp = (struct ispsoftc *) self;
198 
199 	/*
200 	 * After this point, we'll be doing the new configuration
201 	 * schema which allows interrupts, so we can do tsleep/wakeup
202 	 * for mailbox stuff at that point, if that's allowed.
203 	 */
204 	if (IS_FC(isp)) {
205 		isp->isp_osinfo.no_mbox_ints = 0;
206 	}
207 }
208 
209 
210 /*
211  * minphys our xfers
212  */
213 
214 static void
215 ispminphys_1020(struct buf *bp)
216 {
217 	if (bp->b_bcount >= (1 << 24)) {
218 		bp->b_bcount = (1 << 24);
219 	}
220 	minphys(bp);
221 }
222 
223 static void
224 ispminphys(struct buf *bp)
225 {
226 	if (bp->b_bcount >= (1 << 30)) {
227 		bp->b_bcount = (1 << 30);
228 	}
229 	minphys(bp);
230 }
231 
232 static int
233 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
234 	struct proc *p)
235 {
236 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
237 	int retval = ENOTTY;
238 
239 	switch (cmd) {
240 #ifdef	ISP_FW_CRASH_DUMP
241 	case ISP_GET_FW_CRASH_DUMP:
242 	{
243 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
244 		size_t sz;
245 
246 		retval = 0;
247 		if (IS_2200(isp))
248 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
249 		else
250 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
251 		ISP_LOCK(isp);
252 		if (ptr && *ptr) {
253 			void *uaddr = *((void **) addr);
254 			if (copyout(ptr, uaddr, sz)) {
255 				retval = EFAULT;
256 			} else {
257 				*ptr = 0;
258 			}
259 		} else {
260 			retval = ENXIO;
261 		}
262 		ISP_UNLOCK(isp);
263 		break;
264 	}
265 
266 	case ISP_FORCE_CRASH_DUMP:
267 		ISP_LOCK(isp);
268 		if (isp->isp_osinfo.blocked == 0) {
269                         isp->isp_osinfo.blocked = 1;
270                         scsipi_channel_freeze(&isp->isp_chanA, 1);
271                 }
272 		isp_fw_dump(isp);
273 		isp_reinit(isp);
274 		ISP_UNLOCK(isp);
275 		retval = 0;
276 		break;
277 #endif
278 	case ISP_SDBLEV:
279 	{
280 		int olddblev = isp->isp_dblev;
281 		isp->isp_dblev = *(int *)addr;
282 		*(int *)addr = olddblev;
283 		retval = 0;
284 		break;
285 	}
286 	case ISP_RESETHBA:
287 		ISP_LOCK(isp);
288 		isp_reinit(isp);
289 		ISP_UNLOCK(isp);
290 		retval = 0;
291 		break;
292 	case ISP_RESCAN:
293 		if (IS_FC(isp)) {
294 			ISP_LOCK(isp);
295 			if (isp_fc_runstate(isp, 5 * 1000000)) {
296 				retval = EIO;
297 			} else {
298 				retval = 0;
299 			}
300 			ISP_UNLOCK(isp);
301 		}
302 		break;
303 	case ISP_FC_LIP:
304 		if (IS_FC(isp)) {
305 			ISP_LOCK(isp);
306 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
307 				retval = EIO;
308 			} else {
309 				retval = 0;
310 			}
311 			ISP_UNLOCK(isp);
312 		}
313 		break;
314 	case ISP_FC_GETDINFO:
315 	{
316 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
317 		struct lportdb *lp;
318 
319 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
320 			retval = EINVAL;
321 			break;
322 		}
323 		ISP_LOCK(isp);
324 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
325 		if (lp->valid) {
326 			ifc->loopid = lp->loopid;
327 			ifc->portid = lp->portid;
328 			ifc->node_wwn = lp->node_wwn;
329 			ifc->port_wwn = lp->port_wwn;
330 			retval = 0;
331 		} else {
332 			retval = ENODEV;
333 		}
334 		ISP_UNLOCK(isp);
335 		break;
336 	}
337 	case ISP_GET_STATS:
338 	{
339 		isp_stats_t *sp = (isp_stats_t *) addr;
340 
341 		MEMZERO(sp, sizeof (*sp));
342 		sp->isp_stat_version = ISP_STATS_VERSION;
343 		sp->isp_type = isp->isp_type;
344 		sp->isp_revision = isp->isp_revision;
345 		ISP_LOCK(isp);
346 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
347 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
348 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
349 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
350 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
351 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
352 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
353 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
354 		ISP_UNLOCK(isp);
355 		retval = 0;
356 		break;
357 	}
358 	case ISP_CLR_STATS:
359 		ISP_LOCK(isp);
360 		isp->isp_intcnt = 0;
361 		isp->isp_intbogus = 0;
362 		isp->isp_intmboxc = 0;
363 		isp->isp_intoasync = 0;
364 		isp->isp_rsltccmplt = 0;
365 		isp->isp_fphccmplt = 0;
366 		isp->isp_rscchiwater = 0;
367 		isp->isp_fpcchiwater = 0;
368 		ISP_UNLOCK(isp);
369 		retval = 0;
370 		break;
371 	case ISP_FC_GETHINFO:
372 	{
373 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
374 		MEMZERO(hba, sizeof (*hba));
375 		ISP_LOCK(isp);
376 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
377 		hba->fc_scsi_supported = 1;
378 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
379 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
380 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
381 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
382 		ISP_UNLOCK(isp);
383 		break;
384 	}
385 	case SCBUSIORESET:
386 		ISP_LOCK(isp);
387 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
388 			retval = EIO;
389 		else
390 			retval = 0;
391 		ISP_UNLOCK(isp);
392 		break;
393 	default:
394 		break;
395 	}
396 	return (retval);
397 }
398 
399 static INLINE void
400 ispcmd(struct ispsoftc *isp, XS_T *xs)
401 {
402 	ISP_LOCK(isp);
403 	if (isp->isp_state < ISP_RUNSTATE) {
404 		DISABLE_INTS(isp);
405 		isp_init(isp);
406 		if (isp->isp_state != ISP_INITSTATE) {
407 			ENABLE_INTS(isp);
408 			ISP_UNLOCK(isp);
409 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
410 			XS_SETERR(xs, HBA_BOTCH);
411 			scsipi_done(xs);
412 			return;
413 		}
414 		isp->isp_state = ISP_RUNSTATE;
415 		ENABLE_INTS(isp);
416 	}
417 	/*
418 	 * Handle the case of a FC card where the FC thread hasn't
419 	 * fired up yet and we have loop state to clean up. If we
420 	 * can't clear things up and we've never seen loop up, bounce
421 	 * the command.
422 	 */
423 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
424 	    isp->isp_osinfo.thread == 0) {
425 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
426 		int delay_time;
427 
428 		if (xs->xs_control & XS_CTL_POLL) {
429 			isp->isp_osinfo.no_mbox_ints = 1;
430 		}
431 
432 		if (isp->isp_osinfo.loop_checked == 0) {
433 			delay_time = 10 * 1000000;
434 			isp->isp_osinfo.loop_checked = 1;
435 		} else {
436 			delay_time = 250000;
437 		}
438 
439 		if (isp_fc_runstate(isp, delay_time) != 0) {
440 			if (xs->xs_control & XS_CTL_POLL) {
441 				isp->isp_osinfo.no_mbox_ints = ombi;
442 			}
443 			if (FCPARAM(isp)->loop_seen_once == 0) {
444 				XS_SETERR(xs, HBA_SELTIMEOUT);
445 				scsipi_done(xs);
446 				ISP_UNLOCK(isp);
447 				return;
448 			}
449 			/*
450 			 * Otherwise, fall thru to be queued up for later.
451 			 */
452 		} else {
453 			int wasblocked =
454 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
455 			isp->isp_osinfo.threadwork = 0;
456 			isp->isp_osinfo.blocked =
457 			    isp->isp_osinfo.paused = 0;
458 			if (wasblocked) {
459 				scsipi_channel_thaw(&isp->isp_chanA, 1);
460 			}
461 		}
462 		if (xs->xs_control & XS_CTL_POLL) {
463 			isp->isp_osinfo.no_mbox_ints = ombi;
464 		}
465 	}
466 
467 	if (isp->isp_osinfo.paused) {
468 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
469 		xs->error = XS_RESOURCE_SHORTAGE;
470 		scsipi_done(xs);
471 		ISP_UNLOCK(isp);
472 		return;
473 	}
474 	if (isp->isp_osinfo.blocked) {
475 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
476 		xs->error = XS_REQUEUE;
477 		scsipi_done(xs);
478 		ISP_UNLOCK(isp);
479 		return;
480 	}
481 
482 	if (xs->xs_control & XS_CTL_POLL) {
483 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
484 		isp->isp_osinfo.no_mbox_ints = 1;
485 		isp_polled_cmd(isp, xs);
486 		isp->isp_osinfo.no_mbox_ints = ombi;
487 		ISP_UNLOCK(isp);
488 		return;
489 	}
490 
491 	switch (isp_start(xs)) {
492 	case CMD_QUEUED:
493 		if (xs->timeout) {
494 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
495 		}
496 		break;
497 	case CMD_EAGAIN:
498 		isp->isp_osinfo.paused = 1;
499 		xs->error = XS_RESOURCE_SHORTAGE;
500 		scsipi_channel_freeze(&isp->isp_chanA, 1);
501 		if (IS_DUALBUS(isp)) {
502 			scsipi_channel_freeze(&isp->isp_chanB, 1);
503 		}
504 		scsipi_done(xs);
505 		break;
506 	case CMD_RQLATER:
507 		/*
508 		 * We can only get RQLATER from FC devices (1 channel only)
509 		 *
510 		 * Also, if we've never seen loop up, bounce the command
511 		 * (somebody has booted with no FC cable connected)
512 		 */
513 		if (FCPARAM(isp)->loop_seen_once == 0) {
514 			XS_SETERR(xs, HBA_SELTIMEOUT);
515 			scsipi_done(xs);
516 			break;
517 		}
518 		if (isp->isp_osinfo.blocked == 0) {
519 			isp->isp_osinfo.blocked = 1;
520 			scsipi_channel_freeze(&isp->isp_chanA, 1);
521 		}
522 		xs->error = XS_REQUEUE;
523 		scsipi_done(xs);
524 		break;
525 	case CMD_COMPLETE:
526 		scsipi_done(xs);
527 		break;
528 	}
529 	ISP_UNLOCK(isp);
530 }
531 
532 static void
533 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
534 {
535 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
536 
537 	switch (req) {
538 	case ADAPTER_REQ_RUN_XFER:
539 		ispcmd(isp, (XS_T *) arg);
540 		break;
541 
542 	case ADAPTER_REQ_GROW_RESOURCES:
543 		/* Not supported. */
544 		break;
545 
546 	case ADAPTER_REQ_SET_XFER_MODE:
547 	if (IS_SCSI(isp)) {
548 		struct scsipi_xfer_mode *xm = arg;
549 		int dflags = 0;
550 		sdparam *sdp = SDPARAM(isp);
551 
552 		sdp += chan->chan_channel;
553 		if (xm->xm_mode & PERIPH_CAP_TQING)
554 			dflags |= DPARM_TQING;
555 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
556 			dflags |= DPARM_WIDE;
557 		if (xm->xm_mode & PERIPH_CAP_SYNC)
558 			dflags |= DPARM_SYNC;
559 		ISP_LOCK(isp);
560 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
561 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
562 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
563 		isp->isp_update |= (1 << chan->chan_channel);
564 		ISP_UNLOCK(isp);
565 		isp_prt(isp, ISP_LOGDEBUG1,
566 		    "ispioctl: device flags 0x%x for %d.%d.X",
567 		    dflags, chan->chan_channel, xm->xm_target);
568 		break;
569 	}
570 	default:
571 		break;
572 	}
573 }
574 
575 static void
576 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
577 {
578 	int result;
579 	int infinite = 0, mswait;
580 
581 	result = isp_start(xs);
582 
583 	switch (result) {
584 	case CMD_QUEUED:
585 		break;
586 	case CMD_RQLATER:
587 		if (XS_NOERR(xs)) {
588 			xs->error = XS_REQUEUE;
589 		}
590 	case CMD_EAGAIN:
591 		if (XS_NOERR(xs)) {
592 			xs->error = XS_RESOURCE_SHORTAGE;
593 		}
594 		/* FALLTHROUGH */
595 	case CMD_COMPLETE:
596 		scsipi_done(xs);
597 		return;
598 
599 	}
600 
601 	/*
602 	 * If we can't use interrupts, poll on completion.
603 	 */
604 	if ((mswait = XS_TIME(xs)) == 0)
605 		infinite = 1;
606 
607 	while (mswait || infinite) {
608 		u_int16_t isr, sema, mbox;
609 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
610 			isp_intr(isp, isr, sema, mbox);
611 			if (XS_CMD_DONE_P(xs)) {
612 				break;
613 			}
614 		}
615 		USEC_DELAY(1000);
616 		mswait -= 1;
617 	}
618 
619 	/*
620 	 * If no other error occurred but we didn't finish,
621 	 * something bad happened.
622 	 */
623 	if (XS_CMD_DONE_P(xs) == 0) {
624 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
625 			isp_reinit(isp);
626 		}
627 		if (XS_NOERR(xs)) {
628 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
629 			XS_SETERR(xs, HBA_BOTCH);
630 		}
631 	}
632 	scsipi_done(xs);
633 }
634 
635 void
636 isp_done(XS_T *xs)
637 {
638 	XS_CMD_S_DONE(xs);
639 	if (XS_CMD_WDOG_P(xs) == 0) {
640 		struct ispsoftc *isp = XS_ISP(xs);
641 		callout_stop(&xs->xs_callout);
642 		if (XS_CMD_GRACE_P(xs)) {
643 			isp_prt(isp, ISP_LOGDEBUG1,
644 			    "finished command on borrowed time");
645 		}
646 		XS_CMD_S_CLEAR(xs);
647 		/*
648 		 * Fixup- if we get a QFULL, we need
649 		 * to set XS_BUSY as the error.
650 		 */
651 		if (xs->status == SCSI_QUEUE_FULL) {
652 			xs->error = XS_BUSY;
653 		}
654 		if (isp->isp_osinfo.paused) {
655 			isp->isp_osinfo.paused = 0;
656 			scsipi_channel_timed_thaw(&isp->isp_chanA);
657 			if (IS_DUALBUS(isp)) {
658 				scsipi_channel_timed_thaw(&isp->isp_chanB);
659 			}
660 		}
661 if (xs->error == XS_DRIVER_STUFFUP) {
662 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
663 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
664 }
665 		scsipi_done(xs);
666 	}
667 }
668 
669 static void
670 isp_dog(void *arg)
671 {
672 	XS_T *xs = arg;
673 	struct ispsoftc *isp = XS_ISP(xs);
674 	u_int16_t handle;
675 
676 	ISP_ILOCK(isp);
677 	/*
678 	 * We've decided this command is dead. Make sure we're not trying
679 	 * to kill a command that's already dead by getting it's handle and
680 	 * and seeing whether it's still alive.
681 	 */
682 	handle = isp_find_handle(isp, xs);
683 	if (handle) {
684 		u_int16_t isr, mbox, sema;
685 
686 		if (XS_CMD_DONE_P(xs)) {
687 			isp_prt(isp, ISP_LOGDEBUG1,
688 			    "watchdog found done cmd (handle 0x%x)", handle);
689 			ISP_IUNLOCK(isp);
690 			return;
691 		}
692 
693 		if (XS_CMD_WDOG_P(xs)) {
694 			isp_prt(isp, ISP_LOGDEBUG1,
695 			    "recursive watchdog (handle 0x%x)", handle);
696 			ISP_IUNLOCK(isp);
697 			return;
698 		}
699 
700 		XS_CMD_S_WDOG(xs);
701 
702 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
703 			isp_intr(isp, isr, sema, mbox);
704 
705 		}
706 		if (XS_CMD_DONE_P(xs)) {
707 			isp_prt(isp, ISP_LOGDEBUG1,
708 			    "watchdog cleanup for handle 0x%x", handle);
709 			XS_CMD_C_WDOG(xs);
710 			isp_done(xs);
711 		} else if (XS_CMD_GRACE_P(xs)) {
712 			isp_prt(isp, ISP_LOGDEBUG1,
713 			    "watchdog timeout for handle 0x%x", handle);
714 			/*
715 			 * Make sure the command is *really* dead before we
716 			 * release the handle (and DMA resources) for reuse.
717 			 */
718 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
719 
720 			/*
721 			 * After this point, the comamnd is really dead.
722 			 */
723 			if (XS_XFRLEN(xs)) {
724 				ISP_DMAFREE(isp, xs, handle);
725 			}
726 			isp_destroy_handle(isp, handle);
727 			XS_SETERR(xs, XS_TIMEOUT);
728 			XS_CMD_S_CLEAR(xs);
729 			isp_done(xs);
730 		} else {
731 			u_int16_t nxti, optr;
732 			ispreq_t local, *mp = &local, *qe;
733 			isp_prt(isp, ISP_LOGDEBUG2,
734 			    "possible command timeout on handle %x", handle);
735 			XS_CMD_C_WDOG(xs);
736 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
737 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
738 				ISP_UNLOCK(isp);
739 				return;
740 			}
741 			XS_CMD_S_GRACE(xs);
742 			MEMZERO((void *) mp, sizeof (*mp));
743 			mp->req_header.rqs_entry_count = 1;
744 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
745 			mp->req_modifier = SYNC_ALL;
746 			mp->req_target = XS_CHANNEL(xs) << 7;
747 			isp_put_request(isp, mp, qe);
748 			ISP_ADD_REQUEST(isp, nxti);
749 		}
750 	} else {
751 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
752 	}
753 	ISP_IUNLOCK(isp);
754 }
755 
756 /*
757  * Fibre Channel state cleanup thread
758  */
759 static void
760 isp_create_fc_worker(void *arg)
761 {
762 	struct ispsoftc *isp = arg;
763 
764 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
765 	    "%s:fc_thrd", isp->isp_name)) {
766 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
767 		panic("isp_create_fc_worker");
768 	}
769 
770 }
771 
772 static void
773 isp_fc_worker(void *arg)
774 {
775 	void scsipi_run_queue(struct scsipi_channel *);
776 	struct ispsoftc *isp = arg;
777 
778 	for (;;) {
779 		int s;
780 
781 		/*
782 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
783 		 */
784 		s = splbio();
785 		while (isp->isp_osinfo.threadwork) {
786 			isp->isp_osinfo.threadwork = 0;
787 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
788 				break;
789 			}
790 			if  (isp->isp_osinfo.loop_checked &&
791 			     FCPARAM(isp)->loop_seen_once == 0) {
792 				splx(s);
793 				goto skip;
794 			}
795 			isp->isp_osinfo.threadwork = 1;
796 			splx(s);
797 			delay(500 * 1000);
798 			s = splbio();
799 		}
800 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
801 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
802 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
803 			isp->isp_osinfo.threadwork = 1;
804 			splx(s);
805 			continue;
806 		}
807 
808 		if (isp->isp_osinfo.blocked) {
809 			isp->isp_osinfo.blocked = 0;
810 			isp_prt(isp, ISP_LOGDEBUG0,
811 			    "restarting queues (freeze count %d)",
812 			    isp->isp_chanA.chan_qfreeze);
813 			scsipi_channel_thaw(&isp->isp_chanA, 1);
814 		}
815 
816 		if (isp->isp_osinfo.thread == NULL)
817 			break;
818 
819 skip:
820 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
821 
822 		splx(s);
823 	}
824 
825 	/* In case parent is waiting for us to exit. */
826 	wakeup(&isp->isp_osinfo.thread);
827 
828 	kthread_exit(0);
829 }
830 
831 /*
832  * Free any associated resources prior to decommissioning and
833  * set the card to a known state (so it doesn't wake up and kick
834  * us when we aren't expecting it to).
835  *
836  * Locks are held before coming here.
837  */
838 void
839 isp_uninit(struct ispsoftc *isp)
840 {
841 	isp_lock(isp);
842 	/*
843 	 * Leave with interrupts disabled.
844 	 */
845 	DISABLE_INTS(isp);
846 	isp_unlock(isp);
847 }
848 
849 int
850 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
851 {
852 	int bus, tgt;
853 
854 	switch (cmd) {
855 	case ISPASYNC_NEW_TGT_PARAMS:
856 	if (IS_SCSI(isp) && isp->isp_dblev) {
857 		sdparam *sdp = isp->isp_param;
858 		int flags;
859 		struct scsipi_xfer_mode xm;
860 
861 		tgt = *((int *) arg);
862 		bus = (tgt >> 16) & 0xffff;
863 		tgt &= 0xffff;
864 		sdp += bus;
865 		flags = sdp->isp_devparam[tgt].actv_flags;
866 
867 		xm.xm_mode = 0;
868 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
869 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
870 		xm.xm_target = tgt;
871 
872 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
873 			xm.xm_mode |= PERIPH_CAP_SYNC;
874 		if (flags & DPARM_WIDE)
875 			xm.xm_mode |= PERIPH_CAP_WIDE16;
876 		if (flags & DPARM_TQING)
877 			xm.xm_mode |= PERIPH_CAP_TQING;
878 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
879 		    ASYNC_EVENT_XFER_MODE, &xm);
880 		break;
881 	}
882 	case ISPASYNC_BUS_RESET:
883 		bus = *((int *) arg);
884 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
885 		    ASYNC_EVENT_RESET, NULL);
886 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
887 		break;
888 	case ISPASYNC_LIP:
889 		/*
890 		 * Don't do queue freezes or blockage until we have the
891 		 * thread running that can unfreeze/unblock us.
892 		 */
893 		if (isp->isp_osinfo.blocked == 0)  {
894 			if (isp->isp_osinfo.thread) {
895 				isp->isp_osinfo.blocked = 1;
896 				scsipi_channel_freeze(&isp->isp_chanA, 1);
897 			}
898 		}
899 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
900 		break;
901 	case ISPASYNC_LOOP_RESET:
902 		/*
903 		 * Don't do queue freezes or blockage until we have the
904 		 * thread running that can unfreeze/unblock us.
905 		 */
906 		if (isp->isp_osinfo.blocked == 0) {
907 			if (isp->isp_osinfo.thread) {
908 				isp->isp_osinfo.blocked = 1;
909 				scsipi_channel_freeze(&isp->isp_chanA, 1);
910 			}
911 		}
912 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
913 		break;
914 	case ISPASYNC_LOOP_DOWN:
915 		/*
916 		 * Don't do queue freezes or blockage until we have the
917 		 * thread running that can unfreeze/unblock us.
918 		 */
919 		if (isp->isp_osinfo.blocked == 0) {
920 			if (isp->isp_osinfo.thread) {
921 				isp->isp_osinfo.blocked = 1;
922 				scsipi_channel_freeze(&isp->isp_chanA, 1);
923 			}
924 		}
925 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
926 		break;
927         case ISPASYNC_LOOP_UP:
928 		/*
929 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
930 		 * the FC worker thread. When the FC worker thread
931 		 * is done, let *it* call scsipi_channel_thaw...
932 		 */
933 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
934 		break;
935 	case ISPASYNC_PROMENADE:
936 	if (IS_FC(isp) && isp->isp_dblev) {
937 		static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
938 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
939 		const static char *const roles[4] = {
940 		    "None", "Target", "Initiator", "Target/Initiator"
941 		};
942 		fcparam *fcp = isp->isp_param;
943 		int tgt = *((int *) arg);
944 		struct lportdb *lp = &fcp->portdb[tgt];
945 
946 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
947 		    roles[lp->roles & 0x3],
948 		    (lp->valid)? "Arrived" : "Departed",
949 		    (u_int32_t) (lp->port_wwn >> 32),
950 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
951 		    (u_int32_t) (lp->node_wwn >> 32),
952 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
953 		break;
954 	}
955 	case ISPASYNC_CHANGE_NOTIFY:
956 		if (arg == ISPASYNC_CHANGE_PDB) {
957 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
958 		} else if (arg == ISPASYNC_CHANGE_SNS) {
959 			isp_prt(isp, ISP_LOGINFO,
960 			    "Name Server Database Changed");
961 		}
962 
963 		/*
964 		 * We can set blocked here because we know it's now okay
965 		 * to try and run isp_fc_runstate (in order to build loop
966 		 * state). But we don't try and freeze the midlayer's queue
967 		 * if we have no thread that we can wake to later unfreeze
968 		 * it.
969 		 */
970 		if (isp->isp_osinfo.blocked == 0) {
971 			isp->isp_osinfo.blocked = 1;
972 			if (isp->isp_osinfo.thread) {
973 				scsipi_channel_freeze(&isp->isp_chanA, 1);
974 			}
975 		}
976 		/*
977 		 * Note that we have work for the thread to do, and
978 		 * if the thread is here already, wake it up.
979 		 */
980 		isp->isp_osinfo.threadwork++;
981 		if (isp->isp_osinfo.thread) {
982 			wakeup(&isp->isp_osinfo.thread);
983 		} else {
984 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
985 		}
986 		break;
987 	case ISPASYNC_FABRIC_DEV:
988 	{
989 		int target, base, lim;
990 		fcparam *fcp = isp->isp_param;
991 		struct lportdb *lp = NULL;
992 		struct lportdb *clp = (struct lportdb *) arg;
993 		char *pt;
994 
995 		switch (clp->port_type) {
996 		case 1:
997 			pt = "   N_Port";
998 			break;
999 		case 2:
1000 			pt = "  NL_Port";
1001 			break;
1002 		case 3:
1003 			pt = "F/NL_Port";
1004 			break;
1005 		case 0x7f:
1006 			pt = "  Nx_Port";
1007 			break;
1008 		case 0x81:
1009 			pt = "  F_port";
1010 			break;
1011 		case 0x82:
1012 			pt = "  FL_Port";
1013 			break;
1014 		case 0x84:
1015 			pt = "   E_port";
1016 			break;
1017 		default:
1018 			pt = " ";
1019 			break;
1020 		}
1021 
1022 		isp_prt(isp, ISP_LOGINFO,
1023 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1024 
1025 		/*
1026 		 * If we don't have an initiator role we bail.
1027 		 *
1028 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1029 		 */
1030 
1031 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1032 			break;
1033 		}
1034 
1035 		/*
1036 		 * Is this entry for us? If so, we bail.
1037 		 */
1038 
1039 		if (fcp->isp_portid == clp->portid) {
1040 			break;
1041 		}
1042 
1043 		/*
1044 		 * Else, the default policy is to find room for it in
1045 		 * our local port database. Later, when we execute
1046 		 * the call to isp_pdb_sync either this newly arrived
1047 		 * or already logged in device will be (re)announced.
1048 		 */
1049 
1050 		if (fcp->isp_topo == TOPO_FL_PORT)
1051 			base = FC_SNS_ID+1;
1052 		else
1053 			base = 0;
1054 
1055 		if (fcp->isp_topo == TOPO_N_PORT)
1056 			lim = 1;
1057 		else
1058 			lim = MAX_FC_TARG;
1059 
1060 		/*
1061 		 * Is it already in our list?
1062 		 */
1063 		for (target = base; target < lim; target++) {
1064 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1065 				continue;
1066 			}
1067 			lp = &fcp->portdb[target];
1068 			if (lp->port_wwn == clp->port_wwn &&
1069 			    lp->node_wwn == clp->node_wwn) {
1070 				lp->fabric_dev = 1;
1071 				break;
1072 			}
1073 		}
1074 		if (target < lim) {
1075 			break;
1076 		}
1077 		for (target = base; target < lim; target++) {
1078 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1079 				continue;
1080 			}
1081 			lp = &fcp->portdb[target];
1082 			if (lp->port_wwn == 0) {
1083 				break;
1084 			}
1085 		}
1086 		if (target == lim) {
1087 			isp_prt(isp, ISP_LOGWARN,
1088 			    "out of space for fabric devices");
1089 			break;
1090 		}
1091 		lp->port_type = clp->port_type;
1092 		lp->fc4_type = clp->fc4_type;
1093 		lp->node_wwn = clp->node_wwn;
1094 		lp->port_wwn = clp->port_wwn;
1095 		lp->portid = clp->portid;
1096 		lp->fabric_dev = 1;
1097 		break;
1098 	}
1099 	case ISPASYNC_FW_CRASH:
1100 	{
1101 		u_int16_t mbox1, mbox6;
1102 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1103 		if (IS_DUALBUS(isp)) {
1104 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1105 		} else {
1106 			mbox6 = 0;
1107 		}
1108                 isp_prt(isp, ISP_LOGERR,
1109                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1110                     mbox6, mbox1);
1111 #ifdef	ISP_FW_CRASH_DUMP
1112 		if (IS_FC(isp)) {
1113 			if (isp->isp_osinfo.blocked == 0) {
1114 				isp->isp_osinfo.blocked = 1;
1115 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1116 			}
1117 			isp_fw_dump(isp);
1118 		}
1119 		isp_reinit(isp);
1120 		isp_async(isp, ISPASYNC_FW_RESTART, NULL);
1121 #endif
1122 		break;
1123 	}
1124 	default:
1125 		break;
1126 	}
1127 	return (0);
1128 }
1129 
1130 #include <machine/stdarg.h>
1131 void
1132 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1133 {
1134 	va_list ap;
1135 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1136 		return;
1137 	}
1138 	printf("%s: ", isp->isp_name);
1139 	va_start(ap, fmt);
1140 	vprintf(fmt, ap);
1141 	va_end(ap);
1142 	printf("\n");
1143 }
1144