xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision da5f4674a3fc214be3572d358b66af40ab9401e7)
1 /* $NetBSD: isp_netbsd.c,v 1.60 2003/08/07 01:10:53 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.60 2003/08/07 01:10:53 mjacob Exp $");
63 
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66 
67 
68 /*
69  * Set a timeout for the watchdogging of a command.
70  *
71  * The dimensional analysis is
72  *
73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74  *
75  *			=
76  *
77  *	(milliseconds / 1000) * hz = ticks
78  *
79  *
80  * For timeouts less than 1 second, we'll get zero. Because of this, and
81  * because we want to establish *our* timeout to be longer than what the
82  * firmware might do, we just add 3 seconds at the back end.
83  */
84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
85 
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93 
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98 
99 /*
100  * Complete attachment of hardware, include subdevices.
101  */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 	isp->isp_state = ISP_RUNSTATE;
106 
107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 	/*
111 	 * It's not stated whether max_periph is limited by SPI
112 	 * tag uage, but let's assume that it is.
113 	 */
114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 	} else {
120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 	}
122 
123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 	isp->isp_osinfo._chan.chan_channel = 0;
126 
127 	/*
128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 	 */
130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131 
132 	if (IS_FC(isp)) {
133         	isp->isp_osinfo._chan.chan_flags = SCSIPI_CHAN_NOSETTLE;
134 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
135 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
136 		isp->isp_osinfo.threadwork = 1;
137 		/*
138 		 * Note that isp_create_fc_worker won't get called
139 		 * until much much later (after proc0 is created).
140 		 */
141 		kthread_create(isp_create_fc_worker, isp);
142 #ifdef	ISP_FW_CRASH_DUMP
143 		if (IS_2200(isp)) {
144 			FCPARAM(isp)->isp_dump_data =
145 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
146 				M_NOWAIT);
147 		} else if (IS_23XX(isp)) {
148 			FCPARAM(isp)->isp_dump_data =
149 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
150 				M_NOWAIT);
151 		}
152 		if (FCPARAM(isp)->isp_dump_data)
153 			FCPARAM(isp)->isp_dump_data[0] = 0;
154 #endif
155 	} else {
156 		int bus = 0;
157 		sdparam *sdp = isp->isp_param;
158 
159 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
160 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
161 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
162 		if (IS_DUALBUS(isp)) {
163 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
164 			sdp++;
165 			isp->isp_osinfo.discovered[1] =
166 			    1 << sdp->isp_initiator_id;
167 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
168 			isp->isp_osinfo._chan_b.chan_channel = 1;
169 		}
170 		ISP_LOCK(isp);
171 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
172 		if (IS_DUALBUS(isp)) {
173 			bus++;
174 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
175 		}
176 		ISP_UNLOCK(isp);
177 	}
178 
179 
180 	/*
181          * Defer enabling mailbox interrupts until later.
182          */
183         config_interrupts((struct device *) isp, isp_config_interrupts);
184 
185 	/*
186 	 * And attach children (if any).
187 	 */
188 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
189 	if (IS_DUALBUS(isp)) {
190 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
191 	}
192 }
193 
194 
195 static void
196 isp_config_interrupts(struct device *self)
197 {
198         struct ispsoftc *isp = (struct ispsoftc *) self;
199 
200 	/*
201 	 * After this point, we'll be doing the new configuration
202 	 * schema which allows interrupts, so we can do tsleep/wakeup
203 	 * for mailbox stuff at that point, if that's allowed.
204 	 */
205 	if (IS_FC(isp)) {
206 		isp->isp_osinfo.no_mbox_ints = 0;
207 	}
208 }
209 
210 
211 /*
212  * minphys our xfers
213  */
214 
215 static void
216 ispminphys_1020(struct buf *bp)
217 {
218 	if (bp->b_bcount >= (1 << 24)) {
219 		bp->b_bcount = (1 << 24);
220 	}
221 	minphys(bp);
222 }
223 
224 static void
225 ispminphys(struct buf *bp)
226 {
227 	if (bp->b_bcount >= (1 << 30)) {
228 		bp->b_bcount = (1 << 30);
229 	}
230 	minphys(bp);
231 }
232 
233 static int
234 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
235 	struct proc *p)
236 {
237 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
238 	int retval = ENOTTY;
239 
240 	switch (cmd) {
241 #ifdef	ISP_FW_CRASH_DUMP
242 	case ISP_GET_FW_CRASH_DUMP:
243 	{
244 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
245 		size_t sz;
246 
247 		retval = 0;
248 		if (IS_2200(isp))
249 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
250 		else
251 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
252 		ISP_LOCK(isp);
253 		if (ptr && *ptr) {
254 			void *uaddr = *((void **) addr);
255 			if (copyout(ptr, uaddr, sz)) {
256 				retval = EFAULT;
257 			} else {
258 				*ptr = 0;
259 			}
260 		} else {
261 			retval = ENXIO;
262 		}
263 		ISP_UNLOCK(isp);
264 		break;
265 	}
266 
267 	case ISP_FORCE_CRASH_DUMP:
268 		ISP_LOCK(isp);
269 		if (isp->isp_osinfo.blocked == 0) {
270                         isp->isp_osinfo.blocked = 1;
271                         scsipi_channel_freeze(&isp->isp_chanA, 1);
272                 }
273 		isp_fw_dump(isp);
274 		isp_reinit(isp);
275 		ISP_UNLOCK(isp);
276 		retval = 0;
277 		break;
278 #endif
279 	case ISP_SDBLEV:
280 	{
281 		int olddblev = isp->isp_dblev;
282 		isp->isp_dblev = *(int *)addr;
283 		*(int *)addr = olddblev;
284 		retval = 0;
285 		break;
286 	}
287 	case ISP_RESETHBA:
288 		ISP_LOCK(isp);
289 		isp_reinit(isp);
290 		ISP_UNLOCK(isp);
291 		retval = 0;
292 		break;
293 	case ISP_RESCAN:
294 		if (IS_FC(isp)) {
295 			ISP_LOCK(isp);
296 			if (isp_fc_runstate(isp, 5 * 1000000)) {
297 				retval = EIO;
298 			} else {
299 				retval = 0;
300 			}
301 			ISP_UNLOCK(isp);
302 		}
303 		break;
304 	case ISP_FC_LIP:
305 		if (IS_FC(isp)) {
306 			ISP_LOCK(isp);
307 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
308 				retval = EIO;
309 			} else {
310 				retval = 0;
311 			}
312 			ISP_UNLOCK(isp);
313 		}
314 		break;
315 	case ISP_FC_GETDINFO:
316 	{
317 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
318 		struct lportdb *lp;
319 
320 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
321 			retval = EINVAL;
322 			break;
323 		}
324 		ISP_LOCK(isp);
325 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
326 		if (lp->valid) {
327 			ifc->loopid = lp->loopid;
328 			ifc->portid = lp->portid;
329 			ifc->node_wwn = lp->node_wwn;
330 			ifc->port_wwn = lp->port_wwn;
331 			retval = 0;
332 		} else {
333 			retval = ENODEV;
334 		}
335 		ISP_UNLOCK(isp);
336 		break;
337 	}
338 	case ISP_GET_STATS:
339 	{
340 		isp_stats_t *sp = (isp_stats_t *) addr;
341 
342 		MEMZERO(sp, sizeof (*sp));
343 		sp->isp_stat_version = ISP_STATS_VERSION;
344 		sp->isp_type = isp->isp_type;
345 		sp->isp_revision = isp->isp_revision;
346 		ISP_LOCK(isp);
347 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
348 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
349 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
350 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
351 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
352 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
353 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
354 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
355 		ISP_UNLOCK(isp);
356 		retval = 0;
357 		break;
358 	}
359 	case ISP_CLR_STATS:
360 		ISP_LOCK(isp);
361 		isp->isp_intcnt = 0;
362 		isp->isp_intbogus = 0;
363 		isp->isp_intmboxc = 0;
364 		isp->isp_intoasync = 0;
365 		isp->isp_rsltccmplt = 0;
366 		isp->isp_fphccmplt = 0;
367 		isp->isp_rscchiwater = 0;
368 		isp->isp_fpcchiwater = 0;
369 		ISP_UNLOCK(isp);
370 		retval = 0;
371 		break;
372 	case ISP_FC_GETHINFO:
373 	{
374 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
375 		MEMZERO(hba, sizeof (*hba));
376 		ISP_LOCK(isp);
377 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
378 		hba->fc_scsi_supported = 1;
379 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
380 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
381 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
382 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
383 		hba->active_node_wwn = ISP_NODEWWN(isp);
384 		hba->active_port_wwn = ISP_PORTWWN(isp);
385 		ISP_UNLOCK(isp);
386 		break;
387 	}
388 	case SCBUSIORESET:
389 		ISP_LOCK(isp);
390 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
391 			retval = EIO;
392 		else
393 			retval = 0;
394 		ISP_UNLOCK(isp);
395 		break;
396 	default:
397 		break;
398 	}
399 	return (retval);
400 }
401 
402 static INLINE void
403 ispcmd(struct ispsoftc *isp, XS_T *xs)
404 {
405 	ISP_LOCK(isp);
406 	if (isp->isp_state < ISP_RUNSTATE) {
407 		DISABLE_INTS(isp);
408 		isp_init(isp);
409 		if (isp->isp_state != ISP_INITSTATE) {
410 			ENABLE_INTS(isp);
411 			ISP_UNLOCK(isp);
412 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
413 			XS_SETERR(xs, HBA_BOTCH);
414 			scsipi_done(xs);
415 			return;
416 		}
417 		isp->isp_state = ISP_RUNSTATE;
418 		ENABLE_INTS(isp);
419 	}
420 	/*
421 	 * Handle the case of a FC card where the FC thread hasn't
422 	 * fired up yet and we have loop state to clean up. If we
423 	 * can't clear things up and we've never seen loop up, bounce
424 	 * the command.
425 	 */
426 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
427 	    isp->isp_osinfo.thread == 0) {
428 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
429 		int delay_time;
430 
431 		if (xs->xs_control & XS_CTL_POLL) {
432 			isp->isp_osinfo.no_mbox_ints = 1;
433 		}
434 
435 		if (isp->isp_osinfo.loop_checked == 0) {
436 			delay_time = 10 * 1000000;
437 			isp->isp_osinfo.loop_checked = 1;
438 		} else {
439 			delay_time = 250000;
440 		}
441 
442 		if (isp_fc_runstate(isp, delay_time) != 0) {
443 			if (xs->xs_control & XS_CTL_POLL) {
444 				isp->isp_osinfo.no_mbox_ints = ombi;
445 			}
446 			if (FCPARAM(isp)->loop_seen_once == 0) {
447 				XS_SETERR(xs, HBA_SELTIMEOUT);
448 				scsipi_done(xs);
449 				ISP_UNLOCK(isp);
450 				return;
451 			}
452 			/*
453 			 * Otherwise, fall thru to be queued up for later.
454 			 */
455 		} else {
456 			int wasblocked =
457 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
458 			isp->isp_osinfo.threadwork = 0;
459 			isp->isp_osinfo.blocked =
460 			    isp->isp_osinfo.paused = 0;
461 			if (wasblocked) {
462 				scsipi_channel_thaw(&isp->isp_chanA, 1);
463 			}
464 		}
465 		if (xs->xs_control & XS_CTL_POLL) {
466 			isp->isp_osinfo.no_mbox_ints = ombi;
467 		}
468 	}
469 
470 	if (isp->isp_osinfo.paused) {
471 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
472 		xs->error = XS_RESOURCE_SHORTAGE;
473 		scsipi_done(xs);
474 		ISP_UNLOCK(isp);
475 		return;
476 	}
477 	if (isp->isp_osinfo.blocked) {
478 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
479 		xs->error = XS_REQUEUE;
480 		scsipi_done(xs);
481 		ISP_UNLOCK(isp);
482 		return;
483 	}
484 
485 	if (xs->xs_control & XS_CTL_POLL) {
486 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
487 		isp->isp_osinfo.no_mbox_ints = 1;
488 		isp_polled_cmd(isp, xs);
489 		isp->isp_osinfo.no_mbox_ints = ombi;
490 		ISP_UNLOCK(isp);
491 		return;
492 	}
493 
494 	switch (isp_start(xs)) {
495 	case CMD_QUEUED:
496 		if (xs->timeout) {
497 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
498 		}
499 		break;
500 	case CMD_EAGAIN:
501 		isp->isp_osinfo.paused = 1;
502 		xs->error = XS_RESOURCE_SHORTAGE;
503 		scsipi_channel_freeze(&isp->isp_chanA, 1);
504 		if (IS_DUALBUS(isp)) {
505 			scsipi_channel_freeze(&isp->isp_chanB, 1);
506 		}
507 		scsipi_done(xs);
508 		break;
509 	case CMD_RQLATER:
510 		/*
511 		 * We can only get RQLATER from FC devices (1 channel only)
512 		 *
513 		 * Also, if we've never seen loop up, bounce the command
514 		 * (somebody has booted with no FC cable connected)
515 		 */
516 		if (FCPARAM(isp)->loop_seen_once == 0) {
517 			XS_SETERR(xs, HBA_SELTIMEOUT);
518 			scsipi_done(xs);
519 			break;
520 		}
521 		if (isp->isp_osinfo.blocked == 0) {
522 			isp->isp_osinfo.blocked = 1;
523 			scsipi_channel_freeze(&isp->isp_chanA, 1);
524 		}
525 		xs->error = XS_REQUEUE;
526 		scsipi_done(xs);
527 		break;
528 	case CMD_COMPLETE:
529 		scsipi_done(xs);
530 		break;
531 	}
532 	ISP_UNLOCK(isp);
533 }
534 
535 static void
536 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
537 {
538 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
539 
540 	switch (req) {
541 	case ADAPTER_REQ_RUN_XFER:
542 		ispcmd(isp, (XS_T *) arg);
543 		break;
544 
545 	case ADAPTER_REQ_GROW_RESOURCES:
546 		/* Not supported. */
547 		break;
548 
549 	case ADAPTER_REQ_SET_XFER_MODE:
550 	if (IS_SCSI(isp)) {
551 		struct scsipi_xfer_mode *xm = arg;
552 		int dflags = 0;
553 		sdparam *sdp = SDPARAM(isp);
554 
555 		sdp += chan->chan_channel;
556 		if (xm->xm_mode & PERIPH_CAP_TQING)
557 			dflags |= DPARM_TQING;
558 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
559 			dflags |= DPARM_WIDE;
560 		if (xm->xm_mode & PERIPH_CAP_SYNC)
561 			dflags |= DPARM_SYNC;
562 		ISP_LOCK(isp);
563 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
564 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
565 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
566 		isp->isp_update |= (1 << chan->chan_channel);
567 		ISP_UNLOCK(isp);
568 		isp_prt(isp, ISP_LOGDEBUG1,
569 		    "ispioctl: device flags 0x%x for %d.%d.X",
570 		    dflags, chan->chan_channel, xm->xm_target);
571 		break;
572 	}
573 	default:
574 		break;
575 	}
576 }
577 
578 static void
579 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
580 {
581 	int result;
582 	int infinite = 0, mswait;
583 
584 	result = isp_start(xs);
585 
586 	switch (result) {
587 	case CMD_QUEUED:
588 		break;
589 	case CMD_RQLATER:
590 		if (XS_NOERR(xs)) {
591 			xs->error = XS_REQUEUE;
592 		}
593 	case CMD_EAGAIN:
594 		if (XS_NOERR(xs)) {
595 			xs->error = XS_RESOURCE_SHORTAGE;
596 		}
597 		/* FALLTHROUGH */
598 	case CMD_COMPLETE:
599 		scsipi_done(xs);
600 		return;
601 
602 	}
603 
604 	/*
605 	 * If we can't use interrupts, poll on completion.
606 	 */
607 	if ((mswait = XS_TIME(xs)) == 0)
608 		infinite = 1;
609 
610 	while (mswait || infinite) {
611 		u_int16_t isr, sema, mbox;
612 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
613 			isp_intr(isp, isr, sema, mbox);
614 			if (XS_CMD_DONE_P(xs)) {
615 				break;
616 			}
617 		}
618 		USEC_DELAY(1000);
619 		mswait -= 1;
620 	}
621 
622 	/*
623 	 * If no other error occurred but we didn't finish,
624 	 * something bad happened.
625 	 */
626 	if (XS_CMD_DONE_P(xs) == 0) {
627 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
628 			isp_reinit(isp);
629 		}
630 		if (XS_NOERR(xs)) {
631 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
632 			XS_SETERR(xs, HBA_BOTCH);
633 		}
634 	}
635 	scsipi_done(xs);
636 }
637 
638 void
639 isp_done(XS_T *xs)
640 {
641 	XS_CMD_S_DONE(xs);
642 	if (XS_CMD_WDOG_P(xs) == 0) {
643 		struct ispsoftc *isp = XS_ISP(xs);
644 		callout_stop(&xs->xs_callout);
645 		if (XS_CMD_GRACE_P(xs)) {
646 			isp_prt(isp, ISP_LOGDEBUG1,
647 			    "finished command on borrowed time");
648 		}
649 		XS_CMD_S_CLEAR(xs);
650 		/*
651 		 * Fixup- if we get a QFULL, we need
652 		 * to set XS_BUSY as the error.
653 		 */
654 		if (xs->status == SCSI_QUEUE_FULL) {
655 			xs->error = XS_BUSY;
656 		}
657 		if (isp->isp_osinfo.paused) {
658 			isp->isp_osinfo.paused = 0;
659 			scsipi_channel_timed_thaw(&isp->isp_chanA);
660 			if (IS_DUALBUS(isp)) {
661 				scsipi_channel_timed_thaw(&isp->isp_chanB);
662 			}
663 		}
664 if (xs->error == XS_DRIVER_STUFFUP) {
665 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
666 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
667 }
668 		scsipi_done(xs);
669 	}
670 }
671 
672 static void
673 isp_dog(void *arg)
674 {
675 	XS_T *xs = arg;
676 	struct ispsoftc *isp = XS_ISP(xs);
677 	u_int16_t handle;
678 
679 	ISP_ILOCK(isp);
680 	/*
681 	 * We've decided this command is dead. Make sure we're not trying
682 	 * to kill a command that's already dead by getting it's handle and
683 	 * and seeing whether it's still alive.
684 	 */
685 	handle = isp_find_handle(isp, xs);
686 	if (handle) {
687 		u_int16_t isr, mbox, sema;
688 
689 		if (XS_CMD_DONE_P(xs)) {
690 			isp_prt(isp, ISP_LOGDEBUG1,
691 			    "watchdog found done cmd (handle 0x%x)", handle);
692 			ISP_IUNLOCK(isp);
693 			return;
694 		}
695 
696 		if (XS_CMD_WDOG_P(xs)) {
697 			isp_prt(isp, ISP_LOGDEBUG1,
698 			    "recursive watchdog (handle 0x%x)", handle);
699 			ISP_IUNLOCK(isp);
700 			return;
701 		}
702 
703 		XS_CMD_S_WDOG(xs);
704 
705 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
706 			isp_intr(isp, isr, sema, mbox);
707 
708 		}
709 		if (XS_CMD_DONE_P(xs)) {
710 			isp_prt(isp, ISP_LOGDEBUG1,
711 			    "watchdog cleanup for handle 0x%x", handle);
712 			XS_CMD_C_WDOG(xs);
713 			isp_done(xs);
714 		} else if (XS_CMD_GRACE_P(xs)) {
715 			isp_prt(isp, ISP_LOGDEBUG1,
716 			    "watchdog timeout for handle 0x%x", handle);
717 			/*
718 			 * Make sure the command is *really* dead before we
719 			 * release the handle (and DMA resources) for reuse.
720 			 */
721 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
722 
723 			/*
724 			 * After this point, the comamnd is really dead.
725 			 */
726 			if (XS_XFRLEN(xs)) {
727 				ISP_DMAFREE(isp, xs, handle);
728 			}
729 			isp_destroy_handle(isp, handle);
730 			XS_SETERR(xs, XS_TIMEOUT);
731 			XS_CMD_S_CLEAR(xs);
732 			isp_done(xs);
733 		} else {
734 			u_int16_t nxti, optr;
735 			ispreq_t local, *mp = &local, *qe;
736 			isp_prt(isp, ISP_LOGDEBUG2,
737 			    "possible command timeout on handle %x", handle);
738 			XS_CMD_C_WDOG(xs);
739 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
740 			if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
741 				ISP_UNLOCK(isp);
742 				return;
743 			}
744 			XS_CMD_S_GRACE(xs);
745 			MEMZERO((void *) mp, sizeof (*mp));
746 			mp->req_header.rqs_entry_count = 1;
747 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
748 			mp->req_modifier = SYNC_ALL;
749 			mp->req_target = XS_CHANNEL(xs) << 7;
750 			isp_put_request(isp, mp, qe);
751 			ISP_ADD_REQUEST(isp, nxti);
752 		}
753 	} else {
754 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
755 	}
756 	ISP_IUNLOCK(isp);
757 }
758 
759 /*
760  * Fibre Channel state cleanup thread
761  */
762 static void
763 isp_create_fc_worker(void *arg)
764 {
765 	struct ispsoftc *isp = arg;
766 
767 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
768 	    "%s:fc_thrd", isp->isp_name)) {
769 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
770 		panic("isp_create_fc_worker");
771 	}
772 
773 }
774 
775 static void
776 isp_fc_worker(void *arg)
777 {
778 	void scsipi_run_queue(struct scsipi_channel *);
779 	struct ispsoftc *isp = arg;
780 
781 	for (;;) {
782 		int s;
783 
784 		/*
785 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
786 		 */
787 		s = splbio();
788 		while (isp->isp_osinfo.threadwork) {
789 			isp->isp_osinfo.threadwork = 0;
790 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
791 				break;
792 			}
793 			if  (isp->isp_osinfo.loop_checked &&
794 			     FCPARAM(isp)->loop_seen_once == 0) {
795 				splx(s);
796 				goto skip;
797 			}
798 			isp->isp_osinfo.loop_checked = 1;
799 			isp->isp_osinfo.threadwork = 1;
800 			splx(s);
801 			delay(500 * 1000);
802 			s = splbio();
803 		}
804 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
805 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
806 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
807 			isp->isp_osinfo.threadwork = 1;
808 			splx(s);
809 			continue;
810 		}
811 
812 		if (isp->isp_osinfo.blocked) {
813 			isp->isp_osinfo.blocked = 0;
814 			isp_prt(isp, ISP_LOGDEBUG0,
815 			    "restarting queues (freeze count %d)",
816 			    isp->isp_chanA.chan_qfreeze);
817 			scsipi_channel_thaw(&isp->isp_chanA, 1);
818 		}
819 
820 		if (isp->isp_osinfo.thread == NULL)
821 			break;
822 
823 skip:
824 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
825 
826 		splx(s);
827 	}
828 
829 	/* In case parent is waiting for us to exit. */
830 	wakeup(&isp->isp_osinfo.thread);
831 
832 	kthread_exit(0);
833 }
834 
835 /*
836  * Free any associated resources prior to decommissioning and
837  * set the card to a known state (so it doesn't wake up and kick
838  * us when we aren't expecting it to).
839  *
840  * Locks are held before coming here.
841  */
842 void
843 isp_uninit(struct ispsoftc *isp)
844 {
845 	isp_lock(isp);
846 	/*
847 	 * Leave with interrupts disabled.
848 	 */
849 	DISABLE_INTS(isp);
850 	isp_unlock(isp);
851 }
852 
853 int
854 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
855 {
856 	int bus, tgt;
857 
858 	switch (cmd) {
859 	case ISPASYNC_NEW_TGT_PARAMS:
860 	if (IS_SCSI(isp) && isp->isp_dblev) {
861 		sdparam *sdp = isp->isp_param;
862 		int flags;
863 		struct scsipi_xfer_mode xm;
864 
865 		tgt = *((int *) arg);
866 		bus = (tgt >> 16) & 0xffff;
867 		tgt &= 0xffff;
868 		sdp += bus;
869 		flags = sdp->isp_devparam[tgt].actv_flags;
870 
871 		xm.xm_mode = 0;
872 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
873 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
874 		xm.xm_target = tgt;
875 
876 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
877 			xm.xm_mode |= PERIPH_CAP_SYNC;
878 		if (flags & DPARM_WIDE)
879 			xm.xm_mode |= PERIPH_CAP_WIDE16;
880 		if (flags & DPARM_TQING)
881 			xm.xm_mode |= PERIPH_CAP_TQING;
882 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
883 		    ASYNC_EVENT_XFER_MODE, &xm);
884 		break;
885 	}
886 	case ISPASYNC_BUS_RESET:
887 		bus = *((int *) arg);
888 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
889 		    ASYNC_EVENT_RESET, NULL);
890 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
891 		break;
892 	case ISPASYNC_LIP:
893 		/*
894 		 * Don't do queue freezes or blockage until we have the
895 		 * thread running that can unfreeze/unblock us.
896 		 */
897 		if (isp->isp_osinfo.blocked == 0)  {
898 			if (isp->isp_osinfo.thread) {
899 				isp->isp_osinfo.blocked = 1;
900 				scsipi_channel_freeze(&isp->isp_chanA, 1);
901 			}
902 		}
903 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
904 		break;
905 	case ISPASYNC_LOOP_RESET:
906 		/*
907 		 * Don't do queue freezes or blockage until we have the
908 		 * thread running that can unfreeze/unblock us.
909 		 */
910 		if (isp->isp_osinfo.blocked == 0) {
911 			if (isp->isp_osinfo.thread) {
912 				isp->isp_osinfo.blocked = 1;
913 				scsipi_channel_freeze(&isp->isp_chanA, 1);
914 			}
915 		}
916 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
917 		break;
918 	case ISPASYNC_LOOP_DOWN:
919 		/*
920 		 * Don't do queue freezes or blockage until we have the
921 		 * thread running that can unfreeze/unblock us.
922 		 */
923 		if (isp->isp_osinfo.blocked == 0) {
924 			if (isp->isp_osinfo.thread) {
925 				isp->isp_osinfo.blocked = 1;
926 				scsipi_channel_freeze(&isp->isp_chanA, 1);
927 			}
928 		}
929 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
930 		break;
931         case ISPASYNC_LOOP_UP:
932 		/*
933 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
934 		 * the FC worker thread. When the FC worker thread
935 		 * is done, let *it* call scsipi_channel_thaw...
936 		 */
937 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
938 		break;
939 	case ISPASYNC_PROMENADE:
940 	if (IS_FC(isp) && isp->isp_dblev) {
941 		static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
942 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
943 		const static char *const roles[4] = {
944 		    "None", "Target", "Initiator", "Target/Initiator"
945 		};
946 		fcparam *fcp = isp->isp_param;
947 		int tgt = *((int *) arg);
948 		struct lportdb *lp = &fcp->portdb[tgt];
949 
950 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
951 		    roles[lp->roles & 0x3],
952 		    (lp->valid)? "Arrived" : "Departed",
953 		    (u_int32_t) (lp->port_wwn >> 32),
954 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
955 		    (u_int32_t) (lp->node_wwn >> 32),
956 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
957 		break;
958 	}
959 	case ISPASYNC_CHANGE_NOTIFY:
960 		if (arg == ISPASYNC_CHANGE_PDB) {
961 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
962 		} else if (arg == ISPASYNC_CHANGE_SNS) {
963 			isp_prt(isp, ISP_LOGINFO,
964 			    "Name Server Database Changed");
965 		}
966 
967 		/*
968 		 * We can set blocked here because we know it's now okay
969 		 * to try and run isp_fc_runstate (in order to build loop
970 		 * state). But we don't try and freeze the midlayer's queue
971 		 * if we have no thread that we can wake to later unfreeze
972 		 * it.
973 		 */
974 		if (isp->isp_osinfo.blocked == 0) {
975 			isp->isp_osinfo.blocked = 1;
976 			if (isp->isp_osinfo.thread) {
977 				scsipi_channel_freeze(&isp->isp_chanA, 1);
978 			}
979 		}
980 		/*
981 		 * Note that we have work for the thread to do, and
982 		 * if the thread is here already, wake it up.
983 		 */
984 		isp->isp_osinfo.threadwork++;
985 		if (isp->isp_osinfo.thread) {
986 			wakeup(&isp->isp_osinfo.thread);
987 		} else {
988 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
989 		}
990 		break;
991 	case ISPASYNC_FABRIC_DEV:
992 	{
993 		int target, base, lim;
994 		fcparam *fcp = isp->isp_param;
995 		struct lportdb *lp = NULL;
996 		struct lportdb *clp = (struct lportdb *) arg;
997 		char *pt;
998 
999 		switch (clp->port_type) {
1000 		case 1:
1001 			pt = "   N_Port";
1002 			break;
1003 		case 2:
1004 			pt = "  NL_Port";
1005 			break;
1006 		case 3:
1007 			pt = "F/NL_Port";
1008 			break;
1009 		case 0x7f:
1010 			pt = "  Nx_Port";
1011 			break;
1012 		case 0x81:
1013 			pt = "  F_port";
1014 			break;
1015 		case 0x82:
1016 			pt = "  FL_Port";
1017 			break;
1018 		case 0x84:
1019 			pt = "   E_port";
1020 			break;
1021 		default:
1022 			pt = " ";
1023 			break;
1024 		}
1025 
1026 		isp_prt(isp, ISP_LOGINFO,
1027 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1028 
1029 		/*
1030 		 * If we don't have an initiator role we bail.
1031 		 *
1032 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1033 		 */
1034 
1035 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1036 			break;
1037 		}
1038 
1039 		/*
1040 		 * Is this entry for us? If so, we bail.
1041 		 */
1042 
1043 		if (fcp->isp_portid == clp->portid) {
1044 			break;
1045 		}
1046 
1047 		/*
1048 		 * Else, the default policy is to find room for it in
1049 		 * our local port database. Later, when we execute
1050 		 * the call to isp_pdb_sync either this newly arrived
1051 		 * or already logged in device will be (re)announced.
1052 		 */
1053 
1054 		if (fcp->isp_topo == TOPO_FL_PORT)
1055 			base = FC_SNS_ID+1;
1056 		else
1057 			base = 0;
1058 
1059 		if (fcp->isp_topo == TOPO_N_PORT)
1060 			lim = 1;
1061 		else
1062 			lim = MAX_FC_TARG;
1063 
1064 		/*
1065 		 * Is it already in our list?
1066 		 */
1067 		for (target = base; target < lim; target++) {
1068 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1069 				continue;
1070 			}
1071 			lp = &fcp->portdb[target];
1072 			if (lp->port_wwn == clp->port_wwn &&
1073 			    lp->node_wwn == clp->node_wwn) {
1074 				lp->fabric_dev = 1;
1075 				break;
1076 			}
1077 		}
1078 		if (target < lim) {
1079 			break;
1080 		}
1081 		for (target = base; target < lim; target++) {
1082 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1083 				continue;
1084 			}
1085 			lp = &fcp->portdb[target];
1086 			if (lp->port_wwn == 0) {
1087 				break;
1088 			}
1089 		}
1090 		if (target == lim) {
1091 			isp_prt(isp, ISP_LOGWARN,
1092 			    "out of space for fabric devices");
1093 			break;
1094 		}
1095 		lp->port_type = clp->port_type;
1096 		lp->fc4_type = clp->fc4_type;
1097 		lp->node_wwn = clp->node_wwn;
1098 		lp->port_wwn = clp->port_wwn;
1099 		lp->portid = clp->portid;
1100 		lp->fabric_dev = 1;
1101 		break;
1102 	}
1103 	case ISPASYNC_FW_CRASH:
1104 	{
1105 		u_int16_t mbox1, mbox6;
1106 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1107 		if (IS_DUALBUS(isp)) {
1108 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1109 		} else {
1110 			mbox6 = 0;
1111 		}
1112                 isp_prt(isp, ISP_LOGERR,
1113                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1114                     mbox6, mbox1);
1115 #ifdef	ISP_FW_CRASH_DUMP
1116 		if (IS_FC(isp)) {
1117 			if (isp->isp_osinfo.blocked == 0) {
1118 				isp->isp_osinfo.blocked = 1;
1119 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1120 			}
1121 			isp_fw_dump(isp);
1122 		}
1123 		isp_reinit(isp);
1124 		isp_async(isp, ISPASYNC_FW_RESTART, NULL);
1125 #endif
1126 		break;
1127 	}
1128 	default:
1129 		break;
1130 	}
1131 	return (0);
1132 }
1133 
1134 #include <machine/stdarg.h>
1135 void
1136 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1137 {
1138 	va_list ap;
1139 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1140 		return;
1141 	}
1142 	printf("%s: ", isp->isp_name);
1143 	va_start(ap, fmt);
1144 	vprintf(fmt, ap);
1145 	va_end(ap);
1146 	printf("\n");
1147 }
1148