xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 1ffa7b76c40339c17a0fb2a09fac93f287cfc046)
1 /* $NetBSD: isp_netbsd.c,v 1.59 2003/03/21 18:05:16 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.59 2003/03/21 18:05:16 mjacob Exp $");
63 
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66 
67 
68 /*
69  * Set a timeout for the watchdogging of a command.
70  *
71  * The dimensional analysis is
72  *
73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74  *
75  *			=
76  *
77  *	(milliseconds / 1000) * hz = ticks
78  *
79  *
80  * For timeouts less than 1 second, we'll get zero. Because of this, and
81  * because we want to establish *our* timeout to be longer than what the
82  * firmware might do, we just add 3 seconds at the back end.
83  */
84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
85 
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93 
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98 
99 /*
100  * Complete attachment of hardware, include subdevices.
101  */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 	isp->isp_state = ISP_RUNSTATE;
106 
107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 	/*
111 	 * It's not stated whether max_periph is limited by SPI
112 	 * tag uage, but let's assume that it is.
113 	 */
114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 	} else {
120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 	}
122 
123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 	isp->isp_osinfo._chan.chan_channel = 0;
126 
127 	/*
128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 	 */
130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131 
132 	if (IS_FC(isp)) {
133         	isp->isp_osinfo._chan.chan_flags = SCSIPI_CHAN_NOSETTLE;
134 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
135 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
136 		isp->isp_osinfo.threadwork = 1;
137 		/*
138 		 * Note that isp_create_fc_worker won't get called
139 		 * until much much later (after proc0 is created).
140 		 */
141 		kthread_create(isp_create_fc_worker, isp);
142 #ifdef	ISP_FW_CRASH_DUMP
143 		if (IS_2200(isp)) {
144 			FCPARAM(isp)->isp_dump_data =
145 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
146 				M_NOWAIT);
147 		} else if (IS_23XX(isp)) {
148 			FCPARAM(isp)->isp_dump_data =
149 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
150 				M_NOWAIT);
151 		}
152 		if (FCPARAM(isp)->isp_dump_data)
153 			FCPARAM(isp)->isp_dump_data[0] = 0;
154 #endif
155 	} else {
156 		int bus = 0;
157 		sdparam *sdp = isp->isp_param;
158 
159 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
160 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
161 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
162 		if (IS_DUALBUS(isp)) {
163 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
164 			sdp++;
165 			isp->isp_osinfo.discovered[1] =
166 			    1 << sdp->isp_initiator_id;
167 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
168 			isp->isp_osinfo._chan_b.chan_channel = 1;
169 		}
170 		ISP_LOCK(isp);
171 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
172 		if (IS_DUALBUS(isp)) {
173 			bus++;
174 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
175 		}
176 		ISP_UNLOCK(isp);
177 	}
178 
179 
180 	/*
181          * Defer enabling mailbox interrupts until later.
182          */
183         config_interrupts((struct device *) isp, isp_config_interrupts);
184 
185 	/*
186 	 * And attach children (if any).
187 	 */
188 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
189 	if (IS_DUALBUS(isp)) {
190 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
191 	}
192 }
193 
194 
195 static void
196 isp_config_interrupts(struct device *self)
197 {
198         struct ispsoftc *isp = (struct ispsoftc *) self;
199 
200 	/*
201 	 * After this point, we'll be doing the new configuration
202 	 * schema which allows interrupts, so we can do tsleep/wakeup
203 	 * for mailbox stuff at that point, if that's allowed.
204 	 */
205 	if (IS_FC(isp)) {
206 		isp->isp_osinfo.no_mbox_ints = 0;
207 	}
208 }
209 
210 
211 /*
212  * minphys our xfers
213  */
214 
215 static void
216 ispminphys_1020(struct buf *bp)
217 {
218 	if (bp->b_bcount >= (1 << 24)) {
219 		bp->b_bcount = (1 << 24);
220 	}
221 	minphys(bp);
222 }
223 
224 static void
225 ispminphys(struct buf *bp)
226 {
227 	if (bp->b_bcount >= (1 << 30)) {
228 		bp->b_bcount = (1 << 30);
229 	}
230 	minphys(bp);
231 }
232 
233 static int
234 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
235 	struct proc *p)
236 {
237 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
238 	int retval = ENOTTY;
239 
240 	switch (cmd) {
241 #ifdef	ISP_FW_CRASH_DUMP
242 	case ISP_GET_FW_CRASH_DUMP:
243 	{
244 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
245 		size_t sz;
246 
247 		retval = 0;
248 		if (IS_2200(isp))
249 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
250 		else
251 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
252 		ISP_LOCK(isp);
253 		if (ptr && *ptr) {
254 			void *uaddr = *((void **) addr);
255 			if (copyout(ptr, uaddr, sz)) {
256 				retval = EFAULT;
257 			} else {
258 				*ptr = 0;
259 			}
260 		} else {
261 			retval = ENXIO;
262 		}
263 		ISP_UNLOCK(isp);
264 		break;
265 	}
266 
267 	case ISP_FORCE_CRASH_DUMP:
268 		ISP_LOCK(isp);
269 		if (isp->isp_osinfo.blocked == 0) {
270                         isp->isp_osinfo.blocked = 1;
271                         scsipi_channel_freeze(&isp->isp_chanA, 1);
272                 }
273 		isp_fw_dump(isp);
274 		isp_reinit(isp);
275 		ISP_UNLOCK(isp);
276 		retval = 0;
277 		break;
278 #endif
279 	case ISP_SDBLEV:
280 	{
281 		int olddblev = isp->isp_dblev;
282 		isp->isp_dblev = *(int *)addr;
283 		*(int *)addr = olddblev;
284 		retval = 0;
285 		break;
286 	}
287 	case ISP_RESETHBA:
288 		ISP_LOCK(isp);
289 		isp_reinit(isp);
290 		ISP_UNLOCK(isp);
291 		retval = 0;
292 		break;
293 	case ISP_RESCAN:
294 		if (IS_FC(isp)) {
295 			ISP_LOCK(isp);
296 			if (isp_fc_runstate(isp, 5 * 1000000)) {
297 				retval = EIO;
298 			} else {
299 				retval = 0;
300 			}
301 			ISP_UNLOCK(isp);
302 		}
303 		break;
304 	case ISP_FC_LIP:
305 		if (IS_FC(isp)) {
306 			ISP_LOCK(isp);
307 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
308 				retval = EIO;
309 			} else {
310 				retval = 0;
311 			}
312 			ISP_UNLOCK(isp);
313 		}
314 		break;
315 	case ISP_FC_GETDINFO:
316 	{
317 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
318 		struct lportdb *lp;
319 
320 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
321 			retval = EINVAL;
322 			break;
323 		}
324 		ISP_LOCK(isp);
325 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
326 		if (lp->valid) {
327 			ifc->loopid = lp->loopid;
328 			ifc->portid = lp->portid;
329 			ifc->node_wwn = lp->node_wwn;
330 			ifc->port_wwn = lp->port_wwn;
331 			retval = 0;
332 		} else {
333 			retval = ENODEV;
334 		}
335 		ISP_UNLOCK(isp);
336 		break;
337 	}
338 	case ISP_GET_STATS:
339 	{
340 		isp_stats_t *sp = (isp_stats_t *) addr;
341 
342 		MEMZERO(sp, sizeof (*sp));
343 		sp->isp_stat_version = ISP_STATS_VERSION;
344 		sp->isp_type = isp->isp_type;
345 		sp->isp_revision = isp->isp_revision;
346 		ISP_LOCK(isp);
347 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
348 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
349 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
350 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
351 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
352 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
353 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
354 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
355 		ISP_UNLOCK(isp);
356 		retval = 0;
357 		break;
358 	}
359 	case ISP_CLR_STATS:
360 		ISP_LOCK(isp);
361 		isp->isp_intcnt = 0;
362 		isp->isp_intbogus = 0;
363 		isp->isp_intmboxc = 0;
364 		isp->isp_intoasync = 0;
365 		isp->isp_rsltccmplt = 0;
366 		isp->isp_fphccmplt = 0;
367 		isp->isp_rscchiwater = 0;
368 		isp->isp_fpcchiwater = 0;
369 		ISP_UNLOCK(isp);
370 		retval = 0;
371 		break;
372 	case ISP_FC_GETHINFO:
373 	{
374 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
375 		MEMZERO(hba, sizeof (*hba));
376 		ISP_LOCK(isp);
377 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
378 		hba->fc_scsi_supported = 1;
379 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
380 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
381 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
382 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
383 		ISP_UNLOCK(isp);
384 		break;
385 	}
386 	case SCBUSIORESET:
387 		ISP_LOCK(isp);
388 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
389 			retval = EIO;
390 		else
391 			retval = 0;
392 		ISP_UNLOCK(isp);
393 		break;
394 	default:
395 		break;
396 	}
397 	return (retval);
398 }
399 
400 static INLINE void
401 ispcmd(struct ispsoftc *isp, XS_T *xs)
402 {
403 	ISP_LOCK(isp);
404 	if (isp->isp_state < ISP_RUNSTATE) {
405 		DISABLE_INTS(isp);
406 		isp_init(isp);
407 		if (isp->isp_state != ISP_INITSTATE) {
408 			ENABLE_INTS(isp);
409 			ISP_UNLOCK(isp);
410 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
411 			XS_SETERR(xs, HBA_BOTCH);
412 			scsipi_done(xs);
413 			return;
414 		}
415 		isp->isp_state = ISP_RUNSTATE;
416 		ENABLE_INTS(isp);
417 	}
418 	/*
419 	 * Handle the case of a FC card where the FC thread hasn't
420 	 * fired up yet and we have loop state to clean up. If we
421 	 * can't clear things up and we've never seen loop up, bounce
422 	 * the command.
423 	 */
424 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
425 	    isp->isp_osinfo.thread == 0) {
426 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
427 		int delay_time;
428 
429 		if (xs->xs_control & XS_CTL_POLL) {
430 			isp->isp_osinfo.no_mbox_ints = 1;
431 		}
432 
433 		if (isp->isp_osinfo.loop_checked == 0) {
434 			delay_time = 10 * 1000000;
435 			isp->isp_osinfo.loop_checked = 1;
436 		} else {
437 			delay_time = 250000;
438 		}
439 
440 		if (isp_fc_runstate(isp, delay_time) != 0) {
441 			if (xs->xs_control & XS_CTL_POLL) {
442 				isp->isp_osinfo.no_mbox_ints = ombi;
443 			}
444 			if (FCPARAM(isp)->loop_seen_once == 0) {
445 				XS_SETERR(xs, HBA_SELTIMEOUT);
446 				scsipi_done(xs);
447 				ISP_UNLOCK(isp);
448 				return;
449 			}
450 			/*
451 			 * Otherwise, fall thru to be queued up for later.
452 			 */
453 		} else {
454 			int wasblocked =
455 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
456 			isp->isp_osinfo.threadwork = 0;
457 			isp->isp_osinfo.blocked =
458 			    isp->isp_osinfo.paused = 0;
459 			if (wasblocked) {
460 				scsipi_channel_thaw(&isp->isp_chanA, 1);
461 			}
462 		}
463 		if (xs->xs_control & XS_CTL_POLL) {
464 			isp->isp_osinfo.no_mbox_ints = ombi;
465 		}
466 	}
467 
468 	if (isp->isp_osinfo.paused) {
469 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
470 		xs->error = XS_RESOURCE_SHORTAGE;
471 		scsipi_done(xs);
472 		ISP_UNLOCK(isp);
473 		return;
474 	}
475 	if (isp->isp_osinfo.blocked) {
476 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
477 		xs->error = XS_REQUEUE;
478 		scsipi_done(xs);
479 		ISP_UNLOCK(isp);
480 		return;
481 	}
482 
483 	if (xs->xs_control & XS_CTL_POLL) {
484 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
485 		isp->isp_osinfo.no_mbox_ints = 1;
486 		isp_polled_cmd(isp, xs);
487 		isp->isp_osinfo.no_mbox_ints = ombi;
488 		ISP_UNLOCK(isp);
489 		return;
490 	}
491 
492 	switch (isp_start(xs)) {
493 	case CMD_QUEUED:
494 		if (xs->timeout) {
495 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
496 		}
497 		break;
498 	case CMD_EAGAIN:
499 		isp->isp_osinfo.paused = 1;
500 		xs->error = XS_RESOURCE_SHORTAGE;
501 		scsipi_channel_freeze(&isp->isp_chanA, 1);
502 		if (IS_DUALBUS(isp)) {
503 			scsipi_channel_freeze(&isp->isp_chanB, 1);
504 		}
505 		scsipi_done(xs);
506 		break;
507 	case CMD_RQLATER:
508 		/*
509 		 * We can only get RQLATER from FC devices (1 channel only)
510 		 *
511 		 * Also, if we've never seen loop up, bounce the command
512 		 * (somebody has booted with no FC cable connected)
513 		 */
514 		if (FCPARAM(isp)->loop_seen_once == 0) {
515 			XS_SETERR(xs, HBA_SELTIMEOUT);
516 			scsipi_done(xs);
517 			break;
518 		}
519 		if (isp->isp_osinfo.blocked == 0) {
520 			isp->isp_osinfo.blocked = 1;
521 			scsipi_channel_freeze(&isp->isp_chanA, 1);
522 		}
523 		xs->error = XS_REQUEUE;
524 		scsipi_done(xs);
525 		break;
526 	case CMD_COMPLETE:
527 		scsipi_done(xs);
528 		break;
529 	}
530 	ISP_UNLOCK(isp);
531 }
532 
533 static void
534 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
535 {
536 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
537 
538 	switch (req) {
539 	case ADAPTER_REQ_RUN_XFER:
540 		ispcmd(isp, (XS_T *) arg);
541 		break;
542 
543 	case ADAPTER_REQ_GROW_RESOURCES:
544 		/* Not supported. */
545 		break;
546 
547 	case ADAPTER_REQ_SET_XFER_MODE:
548 	if (IS_SCSI(isp)) {
549 		struct scsipi_xfer_mode *xm = arg;
550 		int dflags = 0;
551 		sdparam *sdp = SDPARAM(isp);
552 
553 		sdp += chan->chan_channel;
554 		if (xm->xm_mode & PERIPH_CAP_TQING)
555 			dflags |= DPARM_TQING;
556 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
557 			dflags |= DPARM_WIDE;
558 		if (xm->xm_mode & PERIPH_CAP_SYNC)
559 			dflags |= DPARM_SYNC;
560 		ISP_LOCK(isp);
561 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
562 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
563 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
564 		isp->isp_update |= (1 << chan->chan_channel);
565 		ISP_UNLOCK(isp);
566 		isp_prt(isp, ISP_LOGDEBUG1,
567 		    "ispioctl: device flags 0x%x for %d.%d.X",
568 		    dflags, chan->chan_channel, xm->xm_target);
569 		break;
570 	}
571 	default:
572 		break;
573 	}
574 }
575 
576 static void
577 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
578 {
579 	int result;
580 	int infinite = 0, mswait;
581 
582 	result = isp_start(xs);
583 
584 	switch (result) {
585 	case CMD_QUEUED:
586 		break;
587 	case CMD_RQLATER:
588 		if (XS_NOERR(xs)) {
589 			xs->error = XS_REQUEUE;
590 		}
591 	case CMD_EAGAIN:
592 		if (XS_NOERR(xs)) {
593 			xs->error = XS_RESOURCE_SHORTAGE;
594 		}
595 		/* FALLTHROUGH */
596 	case CMD_COMPLETE:
597 		scsipi_done(xs);
598 		return;
599 
600 	}
601 
602 	/*
603 	 * If we can't use interrupts, poll on completion.
604 	 */
605 	if ((mswait = XS_TIME(xs)) == 0)
606 		infinite = 1;
607 
608 	while (mswait || infinite) {
609 		u_int16_t isr, sema, mbox;
610 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
611 			isp_intr(isp, isr, sema, mbox);
612 			if (XS_CMD_DONE_P(xs)) {
613 				break;
614 			}
615 		}
616 		USEC_DELAY(1000);
617 		mswait -= 1;
618 	}
619 
620 	/*
621 	 * If no other error occurred but we didn't finish,
622 	 * something bad happened.
623 	 */
624 	if (XS_CMD_DONE_P(xs) == 0) {
625 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
626 			isp_reinit(isp);
627 		}
628 		if (XS_NOERR(xs)) {
629 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
630 			XS_SETERR(xs, HBA_BOTCH);
631 		}
632 	}
633 	scsipi_done(xs);
634 }
635 
636 void
637 isp_done(XS_T *xs)
638 {
639 	XS_CMD_S_DONE(xs);
640 	if (XS_CMD_WDOG_P(xs) == 0) {
641 		struct ispsoftc *isp = XS_ISP(xs);
642 		callout_stop(&xs->xs_callout);
643 		if (XS_CMD_GRACE_P(xs)) {
644 			isp_prt(isp, ISP_LOGDEBUG1,
645 			    "finished command on borrowed time");
646 		}
647 		XS_CMD_S_CLEAR(xs);
648 		/*
649 		 * Fixup- if we get a QFULL, we need
650 		 * to set XS_BUSY as the error.
651 		 */
652 		if (xs->status == SCSI_QUEUE_FULL) {
653 			xs->error = XS_BUSY;
654 		}
655 		if (isp->isp_osinfo.paused) {
656 			isp->isp_osinfo.paused = 0;
657 			scsipi_channel_timed_thaw(&isp->isp_chanA);
658 			if (IS_DUALBUS(isp)) {
659 				scsipi_channel_timed_thaw(&isp->isp_chanB);
660 			}
661 		}
662 if (xs->error == XS_DRIVER_STUFFUP) {
663 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
664 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
665 }
666 		scsipi_done(xs);
667 	}
668 }
669 
670 static void
671 isp_dog(void *arg)
672 {
673 	XS_T *xs = arg;
674 	struct ispsoftc *isp = XS_ISP(xs);
675 	u_int16_t handle;
676 
677 	ISP_ILOCK(isp);
678 	/*
679 	 * We've decided this command is dead. Make sure we're not trying
680 	 * to kill a command that's already dead by getting it's handle and
681 	 * and seeing whether it's still alive.
682 	 */
683 	handle = isp_find_handle(isp, xs);
684 	if (handle) {
685 		u_int16_t isr, mbox, sema;
686 
687 		if (XS_CMD_DONE_P(xs)) {
688 			isp_prt(isp, ISP_LOGDEBUG1,
689 			    "watchdog found done cmd (handle 0x%x)", handle);
690 			ISP_IUNLOCK(isp);
691 			return;
692 		}
693 
694 		if (XS_CMD_WDOG_P(xs)) {
695 			isp_prt(isp, ISP_LOGDEBUG1,
696 			    "recursive watchdog (handle 0x%x)", handle);
697 			ISP_IUNLOCK(isp);
698 			return;
699 		}
700 
701 		XS_CMD_S_WDOG(xs);
702 
703 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
704 			isp_intr(isp, isr, sema, mbox);
705 
706 		}
707 		if (XS_CMD_DONE_P(xs)) {
708 			isp_prt(isp, ISP_LOGDEBUG1,
709 			    "watchdog cleanup for handle 0x%x", handle);
710 			XS_CMD_C_WDOG(xs);
711 			isp_done(xs);
712 		} else if (XS_CMD_GRACE_P(xs)) {
713 			isp_prt(isp, ISP_LOGDEBUG1,
714 			    "watchdog timeout for handle 0x%x", handle);
715 			/*
716 			 * Make sure the command is *really* dead before we
717 			 * release the handle (and DMA resources) for reuse.
718 			 */
719 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
720 
721 			/*
722 			 * After this point, the comamnd is really dead.
723 			 */
724 			if (XS_XFRLEN(xs)) {
725 				ISP_DMAFREE(isp, xs, handle);
726 			}
727 			isp_destroy_handle(isp, handle);
728 			XS_SETERR(xs, XS_TIMEOUT);
729 			XS_CMD_S_CLEAR(xs);
730 			isp_done(xs);
731 		} else {
732 			u_int16_t nxti, optr;
733 			ispreq_t local, *mp = &local, *qe;
734 			isp_prt(isp, ISP_LOGDEBUG2,
735 			    "possible command timeout on handle %x", handle);
736 			XS_CMD_C_WDOG(xs);
737 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
738 			if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
739 				ISP_UNLOCK(isp);
740 				return;
741 			}
742 			XS_CMD_S_GRACE(xs);
743 			MEMZERO((void *) mp, sizeof (*mp));
744 			mp->req_header.rqs_entry_count = 1;
745 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
746 			mp->req_modifier = SYNC_ALL;
747 			mp->req_target = XS_CHANNEL(xs) << 7;
748 			isp_put_request(isp, mp, qe);
749 			ISP_ADD_REQUEST(isp, nxti);
750 		}
751 	} else {
752 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
753 	}
754 	ISP_IUNLOCK(isp);
755 }
756 
757 /*
758  * Fibre Channel state cleanup thread
759  */
760 static void
761 isp_create_fc_worker(void *arg)
762 {
763 	struct ispsoftc *isp = arg;
764 
765 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
766 	    "%s:fc_thrd", isp->isp_name)) {
767 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
768 		panic("isp_create_fc_worker");
769 	}
770 
771 }
772 
773 static void
774 isp_fc_worker(void *arg)
775 {
776 	void scsipi_run_queue(struct scsipi_channel *);
777 	struct ispsoftc *isp = arg;
778 
779 	for (;;) {
780 		int s;
781 
782 		/*
783 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
784 		 */
785 		s = splbio();
786 		while (isp->isp_osinfo.threadwork) {
787 			isp->isp_osinfo.threadwork = 0;
788 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
789 				break;
790 			}
791 			if  (isp->isp_osinfo.loop_checked &&
792 			     FCPARAM(isp)->loop_seen_once == 0) {
793 				splx(s);
794 				goto skip;
795 			}
796 			isp->isp_osinfo.threadwork = 1;
797 			splx(s);
798 			delay(500 * 1000);
799 			s = splbio();
800 		}
801 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
802 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
803 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
804 			isp->isp_osinfo.threadwork = 1;
805 			splx(s);
806 			continue;
807 		}
808 
809 		if (isp->isp_osinfo.blocked) {
810 			isp->isp_osinfo.blocked = 0;
811 			isp_prt(isp, ISP_LOGDEBUG0,
812 			    "restarting queues (freeze count %d)",
813 			    isp->isp_chanA.chan_qfreeze);
814 			scsipi_channel_thaw(&isp->isp_chanA, 1);
815 		}
816 
817 		if (isp->isp_osinfo.thread == NULL)
818 			break;
819 
820 skip:
821 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
822 
823 		splx(s);
824 	}
825 
826 	/* In case parent is waiting for us to exit. */
827 	wakeup(&isp->isp_osinfo.thread);
828 
829 	kthread_exit(0);
830 }
831 
832 /*
833  * Free any associated resources prior to decommissioning and
834  * set the card to a known state (so it doesn't wake up and kick
835  * us when we aren't expecting it to).
836  *
837  * Locks are held before coming here.
838  */
839 void
840 isp_uninit(struct ispsoftc *isp)
841 {
842 	isp_lock(isp);
843 	/*
844 	 * Leave with interrupts disabled.
845 	 */
846 	DISABLE_INTS(isp);
847 	isp_unlock(isp);
848 }
849 
850 int
851 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
852 {
853 	int bus, tgt;
854 
855 	switch (cmd) {
856 	case ISPASYNC_NEW_TGT_PARAMS:
857 	if (IS_SCSI(isp) && isp->isp_dblev) {
858 		sdparam *sdp = isp->isp_param;
859 		int flags;
860 		struct scsipi_xfer_mode xm;
861 
862 		tgt = *((int *) arg);
863 		bus = (tgt >> 16) & 0xffff;
864 		tgt &= 0xffff;
865 		sdp += bus;
866 		flags = sdp->isp_devparam[tgt].actv_flags;
867 
868 		xm.xm_mode = 0;
869 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
870 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
871 		xm.xm_target = tgt;
872 
873 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
874 			xm.xm_mode |= PERIPH_CAP_SYNC;
875 		if (flags & DPARM_WIDE)
876 			xm.xm_mode |= PERIPH_CAP_WIDE16;
877 		if (flags & DPARM_TQING)
878 			xm.xm_mode |= PERIPH_CAP_TQING;
879 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
880 		    ASYNC_EVENT_XFER_MODE, &xm);
881 		break;
882 	}
883 	case ISPASYNC_BUS_RESET:
884 		bus = *((int *) arg);
885 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
886 		    ASYNC_EVENT_RESET, NULL);
887 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
888 		break;
889 	case ISPASYNC_LIP:
890 		/*
891 		 * Don't do queue freezes or blockage until we have the
892 		 * thread running that can unfreeze/unblock us.
893 		 */
894 		if (isp->isp_osinfo.blocked == 0)  {
895 			if (isp->isp_osinfo.thread) {
896 				isp->isp_osinfo.blocked = 1;
897 				scsipi_channel_freeze(&isp->isp_chanA, 1);
898 			}
899 		}
900 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
901 		break;
902 	case ISPASYNC_LOOP_RESET:
903 		/*
904 		 * Don't do queue freezes or blockage until we have the
905 		 * thread running that can unfreeze/unblock us.
906 		 */
907 		if (isp->isp_osinfo.blocked == 0) {
908 			if (isp->isp_osinfo.thread) {
909 				isp->isp_osinfo.blocked = 1;
910 				scsipi_channel_freeze(&isp->isp_chanA, 1);
911 			}
912 		}
913 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
914 		break;
915 	case ISPASYNC_LOOP_DOWN:
916 		/*
917 		 * Don't do queue freezes or blockage until we have the
918 		 * thread running that can unfreeze/unblock us.
919 		 */
920 		if (isp->isp_osinfo.blocked == 0) {
921 			if (isp->isp_osinfo.thread) {
922 				isp->isp_osinfo.blocked = 1;
923 				scsipi_channel_freeze(&isp->isp_chanA, 1);
924 			}
925 		}
926 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
927 		break;
928         case ISPASYNC_LOOP_UP:
929 		/*
930 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
931 		 * the FC worker thread. When the FC worker thread
932 		 * is done, let *it* call scsipi_channel_thaw...
933 		 */
934 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
935 		break;
936 	case ISPASYNC_PROMENADE:
937 	if (IS_FC(isp) && isp->isp_dblev) {
938 		static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
939 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
940 		const static char *const roles[4] = {
941 		    "None", "Target", "Initiator", "Target/Initiator"
942 		};
943 		fcparam *fcp = isp->isp_param;
944 		int tgt = *((int *) arg);
945 		struct lportdb *lp = &fcp->portdb[tgt];
946 
947 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
948 		    roles[lp->roles & 0x3],
949 		    (lp->valid)? "Arrived" : "Departed",
950 		    (u_int32_t) (lp->port_wwn >> 32),
951 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
952 		    (u_int32_t) (lp->node_wwn >> 32),
953 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
954 		break;
955 	}
956 	case ISPASYNC_CHANGE_NOTIFY:
957 		if (arg == ISPASYNC_CHANGE_PDB) {
958 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
959 		} else if (arg == ISPASYNC_CHANGE_SNS) {
960 			isp_prt(isp, ISP_LOGINFO,
961 			    "Name Server Database Changed");
962 		}
963 
964 		/*
965 		 * We can set blocked here because we know it's now okay
966 		 * to try and run isp_fc_runstate (in order to build loop
967 		 * state). But we don't try and freeze the midlayer's queue
968 		 * if we have no thread that we can wake to later unfreeze
969 		 * it.
970 		 */
971 		if (isp->isp_osinfo.blocked == 0) {
972 			isp->isp_osinfo.blocked = 1;
973 			if (isp->isp_osinfo.thread) {
974 				scsipi_channel_freeze(&isp->isp_chanA, 1);
975 			}
976 		}
977 		/*
978 		 * Note that we have work for the thread to do, and
979 		 * if the thread is here already, wake it up.
980 		 */
981 		isp->isp_osinfo.threadwork++;
982 		if (isp->isp_osinfo.thread) {
983 			wakeup(&isp->isp_osinfo.thread);
984 		} else {
985 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
986 		}
987 		break;
988 	case ISPASYNC_FABRIC_DEV:
989 	{
990 		int target, base, lim;
991 		fcparam *fcp = isp->isp_param;
992 		struct lportdb *lp = NULL;
993 		struct lportdb *clp = (struct lportdb *) arg;
994 		char *pt;
995 
996 		switch (clp->port_type) {
997 		case 1:
998 			pt = "   N_Port";
999 			break;
1000 		case 2:
1001 			pt = "  NL_Port";
1002 			break;
1003 		case 3:
1004 			pt = "F/NL_Port";
1005 			break;
1006 		case 0x7f:
1007 			pt = "  Nx_Port";
1008 			break;
1009 		case 0x81:
1010 			pt = "  F_port";
1011 			break;
1012 		case 0x82:
1013 			pt = "  FL_Port";
1014 			break;
1015 		case 0x84:
1016 			pt = "   E_port";
1017 			break;
1018 		default:
1019 			pt = " ";
1020 			break;
1021 		}
1022 
1023 		isp_prt(isp, ISP_LOGINFO,
1024 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1025 
1026 		/*
1027 		 * If we don't have an initiator role we bail.
1028 		 *
1029 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1030 		 */
1031 
1032 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1033 			break;
1034 		}
1035 
1036 		/*
1037 		 * Is this entry for us? If so, we bail.
1038 		 */
1039 
1040 		if (fcp->isp_portid == clp->portid) {
1041 			break;
1042 		}
1043 
1044 		/*
1045 		 * Else, the default policy is to find room for it in
1046 		 * our local port database. Later, when we execute
1047 		 * the call to isp_pdb_sync either this newly arrived
1048 		 * or already logged in device will be (re)announced.
1049 		 */
1050 
1051 		if (fcp->isp_topo == TOPO_FL_PORT)
1052 			base = FC_SNS_ID+1;
1053 		else
1054 			base = 0;
1055 
1056 		if (fcp->isp_topo == TOPO_N_PORT)
1057 			lim = 1;
1058 		else
1059 			lim = MAX_FC_TARG;
1060 
1061 		/*
1062 		 * Is it already in our list?
1063 		 */
1064 		for (target = base; target < lim; target++) {
1065 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1066 				continue;
1067 			}
1068 			lp = &fcp->portdb[target];
1069 			if (lp->port_wwn == clp->port_wwn &&
1070 			    lp->node_wwn == clp->node_wwn) {
1071 				lp->fabric_dev = 1;
1072 				break;
1073 			}
1074 		}
1075 		if (target < lim) {
1076 			break;
1077 		}
1078 		for (target = base; target < lim; target++) {
1079 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1080 				continue;
1081 			}
1082 			lp = &fcp->portdb[target];
1083 			if (lp->port_wwn == 0) {
1084 				break;
1085 			}
1086 		}
1087 		if (target == lim) {
1088 			isp_prt(isp, ISP_LOGWARN,
1089 			    "out of space for fabric devices");
1090 			break;
1091 		}
1092 		lp->port_type = clp->port_type;
1093 		lp->fc4_type = clp->fc4_type;
1094 		lp->node_wwn = clp->node_wwn;
1095 		lp->port_wwn = clp->port_wwn;
1096 		lp->portid = clp->portid;
1097 		lp->fabric_dev = 1;
1098 		break;
1099 	}
1100 	case ISPASYNC_FW_CRASH:
1101 	{
1102 		u_int16_t mbox1, mbox6;
1103 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1104 		if (IS_DUALBUS(isp)) {
1105 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1106 		} else {
1107 			mbox6 = 0;
1108 		}
1109                 isp_prt(isp, ISP_LOGERR,
1110                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1111                     mbox6, mbox1);
1112 #ifdef	ISP_FW_CRASH_DUMP
1113 		if (IS_FC(isp)) {
1114 			if (isp->isp_osinfo.blocked == 0) {
1115 				isp->isp_osinfo.blocked = 1;
1116 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1117 			}
1118 			isp_fw_dump(isp);
1119 		}
1120 		isp_reinit(isp);
1121 		isp_async(isp, ISPASYNC_FW_RESTART, NULL);
1122 #endif
1123 		break;
1124 	}
1125 	default:
1126 		break;
1127 	}
1128 	return (0);
1129 }
1130 
1131 #include <machine/stdarg.h>
1132 void
1133 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1134 {
1135 	va_list ap;
1136 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1137 		return;
1138 	}
1139 	printf("%s: ", isp->isp_name);
1140 	va_start(ap, fmt);
1141 	vprintf(fmt, ap);
1142 	va_end(ap);
1143 	printf("\n");
1144 }
1145