xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 17306b8fd0952c7489f93f0230818481e5a1e2c9)
1 /* $NetBSD: isp_netbsd.c,v 1.44 2001/05/25 21:45:55 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <dev/ic/isp_netbsd.h>
62 #include <sys/scsiio.h>
63 
64 
65 /*
66  * Set a timeout for the watchdogging of a command.
67  *
68  * The dimensional analysis is
69  *
70  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
71  *
72  *			=
73  *
74  *	(milliseconds / 1000) * hz = ticks
75  *
76  *
77  * For timeouts less than 1 second, we'll get zero. Because of this, and
78  * because we want to establish *our* timeout to be longer than what the
79  * firmware might do, we just add 3 seconds at the back end.
80  */
81 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
82 
83 static void ispminphys_1020(struct buf *);
84 static void ispminphys(struct buf *);
85 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
86 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
87 static int
88 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
89 
90 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
91 static void isp_dog(void *);
92 static void isp_create_fc_worker(void *);
93 static void isp_fc_worker(void *);
94 
95 /*
96  * Complete attachment of hardware, include subdevices.
97  */
98 void
99 isp_attach(struct ispsoftc *isp)
100 {
101 	isp->isp_state = ISP_RUNSTATE;
102 
103 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
104 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
105 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
106 	/*
107 	 * It's not stated whether max_periph is limited by SPI
108 	 * tag uage, but let's assume that it is.
109 	 */
110 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
111 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
112 	isp->isp_osinfo._adapter.adapt_request = isprequest;
113 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
114 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
115 	} else {
116 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
117 	}
118 
119 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
120 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
121 	isp->isp_osinfo._chan.chan_channel = 0;
122 
123 	/*
124 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
125 	 */
126 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
127 
128 	if (IS_FC(isp)) {
129 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
130 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
131 		isp->isp_osinfo.threadwork = 1;
132 		/*
133 		 * Note that isp_create_fc_worker won't get called
134 		 * until much much later (after proc0 is created).
135 		 */
136 		kthread_create(isp_create_fc_worker, isp);
137 	} else {
138 		int bus = 0;
139 		sdparam *sdp = isp->isp_param;
140 
141 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
142 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
143 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
144 		if (IS_DUALBUS(isp)) {
145 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
146 			sdp++;
147 			isp->isp_osinfo.discovered[1] =
148 			    1 << sdp->isp_initiator_id;
149 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
150 			isp->isp_osinfo._chan_b.chan_channel = 1;
151 		}
152 		ISP_LOCK(isp);
153 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
154 		if (IS_DUALBUS(isp)) {
155 			bus++;
156 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
157 		}
158 		ISP_UNLOCK(isp);
159 	}
160 
161 	/*
162 	 * After this point, we'll be doing the new configuration
163 	 * schema which allows interrups, so we can do tsleep/wakeup
164 	 * for mailbox stuff at that point.
165 	 */
166 	isp->isp_osinfo.no_mbox_ints = 0;
167 
168 	/*
169 	 * And attach children (if any).
170 	 */
171 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
172 	if (IS_DUALBUS(isp)) {
173 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
174 	}
175 }
176 
177 /*
178  * minphys our xfers
179  */
180 
181 static void
182 ispminphys_1020(struct buf *bp)
183 {
184 	if (bp->b_bcount >= (1 << 24)) {
185 		bp->b_bcount = (1 << 24);
186 	}
187 	minphys(bp);
188 }
189 
190 static void
191 ispminphys(struct buf *bp)
192 {
193 	if (bp->b_bcount >= (1 << 30)) {
194 		bp->b_bcount = (1 << 30);
195 	}
196 	minphys(bp);
197 }
198 
199 static int
200 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
201 	struct proc *p)
202 {
203 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
204 	int retval = ENOTTY;
205 
206 	switch (cmd) {
207 	case SCBUSIORESET:
208 		ISP_LOCK(isp);
209 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
210 			retval = EIO;
211 		else
212 			retval = 0;
213 		ISP_UNLOCK(isp);
214 		break;
215 	case ISP_SDBLEV:
216 	{
217 		int olddblev = isp->isp_dblev;
218 		isp->isp_dblev = *(int *)addr;
219 		*(int *)addr = olddblev;
220 		retval = 0;
221 		break;
222 	}
223 	case ISP_RESETHBA:
224 		ISP_LOCK(isp);
225 		isp_reinit(isp);
226 		ISP_UNLOCK(isp);
227 		retval = 0;
228 		break;
229 	case ISP_FC_RESCAN:
230 		if (IS_FC(isp)) {
231 			ISP_LOCK(isp);
232 			if (isp_fc_runstate(isp, 5 * 1000000)) {
233 				retval = EIO;
234 			} else {
235 				retval = 0;
236 			}
237 			ISP_UNLOCK(isp);
238 		}
239 		break;
240 	case ISP_FC_LIP:
241 		if (IS_FC(isp)) {
242 			ISP_LOCK(isp);
243 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
244 				retval = EIO;
245 			} else {
246 				retval = 0;
247 			}
248 			ISP_UNLOCK(isp);
249 		}
250 		break;
251 	case ISP_FC_GETDINFO:
252 	{
253 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
254 		struct lportdb *lp;
255 
256 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
257 			retval = EINVAL;
258 			break;
259 		}
260 		ISP_LOCK(isp);
261 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
262 		if (lp->valid) {
263 			ifc->loopid = lp->loopid;
264 			ifc->portid = lp->portid;
265 			ifc->node_wwn = lp->node_wwn;
266 			ifc->port_wwn = lp->port_wwn;
267 			retval = 0;
268 		} else {
269 			retval = ENODEV;
270 		}
271 		ISP_UNLOCK(isp);
272 		break;
273 	}
274 	default:
275 		break;
276 	}
277 	return (retval);
278 }
279 
280 static INLINE void
281 ispcmd(struct ispsoftc *isp, XS_T *xs)
282 {
283 	ISP_LOCK(isp);
284 	if (isp->isp_state < ISP_RUNSTATE) {
285 		DISABLE_INTS(isp);
286 		isp_init(isp);
287 		if (isp->isp_state != ISP_INITSTATE) {
288 			ENABLE_INTS(isp);
289 			ISP_UNLOCK(isp);
290 			XS_SETERR(xs, HBA_BOTCH);
291 			scsipi_done(xs);
292 			return;
293 		}
294 		isp->isp_state = ISP_RUNSTATE;
295 		ENABLE_INTS(isp);
296 	}
297 	/*
298 	 * Handle the case of a FC card where the FC thread hasn't
299 	 * fired up yet and we have loop state to clean up. If we
300 	 * can't clear things up and we've never seen loop up, bounce
301 	 * the command.
302 	 */
303 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
304 	    isp->isp_osinfo.thread == 0) {
305 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
306 		int delay_time;
307 
308 		if (xs->xs_control & XS_CTL_POLL) {
309 			isp->isp_osinfo.no_mbox_ints = 1;
310 		}
311 
312 		if (isp->isp_osinfo.loop_checked == 0) {
313 			delay_time = 10 * 1000000;
314 			isp->isp_osinfo.loop_checked = 1;
315 		} else {
316 			delay_time = 250000;
317 		}
318 
319 		if (isp_fc_runstate(isp, delay_time) != 0) {
320 			if (xs->xs_control & XS_CTL_POLL) {
321 				isp->isp_osinfo.no_mbox_ints = ombi;
322 			}
323 			if (FCPARAM(isp)->loop_seen_once == 0) {
324 				XS_SETERR(xs, HBA_SELTIMEOUT);
325 				scsipi_done(xs);
326 				ISP_UNLOCK(isp);
327 				return;
328 			}
329 			/*
330 			 * Otherwise, fall thru to be queued up for later.
331 			 */
332 		} else {
333 			int wasblocked =
334 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
335 			isp->isp_osinfo.threadwork = 0;
336 			isp->isp_osinfo.blocked =
337 			    isp->isp_osinfo.paused = 0;
338 isp_prt(isp, ISP_LOGALL, "ispcmd, manual runstate, (freeze count %d)", isp->isp_chanA.chan_qfreeze);
339 			if (wasblocked) {
340 				scsipi_channel_thaw(&isp->isp_chanA, 1);
341 			}
342 		}
343 		if (xs->xs_control & XS_CTL_POLL) {
344 			isp->isp_osinfo.no_mbox_ints = ombi;
345 		}
346 	}
347 
348 	if (isp->isp_osinfo.paused) {
349 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
350 		xs->error = XS_RESOURCE_SHORTAGE;
351 		scsipi_done(xs);
352 		ISP_UNLOCK(isp);
353 		return;
354 	}
355 	if (isp->isp_osinfo.blocked) {
356 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
357 		xs->error = XS_REQUEUE;
358 		scsipi_done(xs);
359 		ISP_UNLOCK(isp);
360 		return;
361 	}
362 
363 	if (xs->xs_control & XS_CTL_POLL) {
364 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
365 		isp->isp_osinfo.no_mbox_ints = 1;
366 		isp_polled_cmd(isp, xs);
367 		isp->isp_osinfo.no_mbox_ints = ombi;
368 		ISP_UNLOCK(isp);
369 		return;
370 	}
371 
372 	switch (isp_start(xs)) {
373 	case CMD_QUEUED:
374 		if (xs->timeout) {
375 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
376 		}
377 		break;
378 	case CMD_EAGAIN:
379 		isp->isp_osinfo.paused = 1;
380 		xs->error = XS_RESOURCE_SHORTAGE;
381 		scsipi_channel_freeze(&isp->isp_chanA, 1);
382 		if (IS_DUALBUS(isp)) {
383 			scsipi_channel_freeze(&isp->isp_chanB, 1);
384 		}
385 		scsipi_done(xs);
386 		break;
387 	case CMD_RQLATER:
388 		/*
389 		 * We can only get RQLATER from FC devices (1 channel only)
390 		 *
391 		 * Also, if we've never seen loop up, bounce the command
392 		 * (somebody has booted with no FC cable connected)
393 		 */
394 		if (FCPARAM(isp)->loop_seen_once == 0) {
395 			XS_SETERR(xs, HBA_SELTIMEOUT);
396 			scsipi_done(xs);
397 			break;
398 		}
399 		if (isp->isp_osinfo.blocked == 0) {
400 			isp->isp_osinfo.blocked = 1;
401 			scsipi_channel_freeze(&isp->isp_chanA, 1);
402 isp_prt(isp, ISP_LOGALL, "ispcmd, RQLATER, (freeze count %d)", isp->isp_chanA.chan_qfreeze);
403 		}
404 		xs->error = XS_REQUEUE;
405 		scsipi_done(xs);
406 		break;
407 	case CMD_COMPLETE:
408 		scsipi_done(xs);
409 		break;
410 	}
411 	ISP_UNLOCK(isp);
412 }
413 
414 static void
415 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
416 {
417 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
418 
419 	switch (req) {
420 	case ADAPTER_REQ_RUN_XFER:
421 		ispcmd(isp, (XS_T *) arg);
422 		break;
423 
424 	case ADAPTER_REQ_GROW_RESOURCES:
425 		/* Not supported. */
426 		break;
427 
428 	case ADAPTER_REQ_SET_XFER_MODE:
429 	if (IS_SCSI(isp)) {
430 		struct scsipi_xfer_mode *xm = arg;
431 		int dflags = 0;
432 		sdparam *sdp = SDPARAM(isp);
433 
434 		sdp += chan->chan_channel;
435 		if (xm->xm_mode & PERIPH_CAP_TQING)
436 			dflags |= DPARM_TQING;
437 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
438 			dflags |= DPARM_WIDE;
439 		if (xm->xm_mode & PERIPH_CAP_SYNC)
440 			dflags |= DPARM_SYNC;
441 		ISP_LOCK(isp);
442 		sdp->isp_devparam[xm->xm_target].dev_flags |= dflags;
443 		dflags = sdp->isp_devparam[xm->xm_target].dev_flags;
444 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
445 		isp->isp_update |= (1 << chan->chan_channel);
446 		ISP_UNLOCK(isp);
447 		isp_prt(isp, ISP_LOGDEBUG1,
448 		    "ispioctl: device flags 0x%x for %d.%d.X",
449 		    dflags, chan->chan_channel, xm->xm_target);
450 		break;
451 	}
452 	default:
453 		break;
454 	}
455 }
456 
457 static void
458 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
459 {
460 	int result;
461 	int infinite = 0, mswait;
462 
463 	result = isp_start(xs);
464 
465 	switch (result) {
466 	case CMD_QUEUED:
467 		break;
468 	case CMD_RQLATER:
469 		if (XS_NOERR(xs)) {
470 			xs->error = XS_REQUEUE;
471 		}
472 	case CMD_EAGAIN:
473 		if (XS_NOERR(xs)) {
474 			xs->error = XS_RESOURCE_SHORTAGE;
475 		}
476 		/* FALLTHROUGH */
477 	case CMD_COMPLETE:
478 		scsipi_done(xs);
479 		return;
480 
481 	}
482 
483 	/*
484 	 * If we can't use interrupts, poll on completion.
485 	 */
486 	if ((mswait = XS_TIME(xs)) == 0)
487 		infinite = 1;
488 
489 	while (mswait || infinite) {
490 		if (isp_intr((void *)isp)) {
491 			if (XS_CMD_DONE_P(xs)) {
492 				break;
493 			}
494 		}
495 		USEC_DELAY(1000);
496 		mswait -= 1;
497 	}
498 
499 	/*
500 	 * If no other error occurred but we didn't finish,
501 	 * something bad happened.
502 	 */
503 	if (XS_CMD_DONE_P(xs) == 0) {
504 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
505 			isp_reinit(isp);
506 		}
507 		if (XS_NOERR(xs)) {
508 			XS_SETERR(xs, HBA_BOTCH);
509 		}
510 	}
511 	scsipi_done(xs);
512 }
513 
514 void
515 isp_done(XS_T *xs)
516 {
517 	XS_CMD_S_DONE(xs);
518 	if (XS_CMD_WDOG_P(xs) == 0) {
519 		struct ispsoftc *isp = XS_ISP(xs);
520 		callout_stop(&xs->xs_callout);
521 		if (XS_CMD_GRACE_P(xs)) {
522 			isp_prt(isp, ISP_LOGDEBUG1,
523 			    "finished command on borrowed time");
524 		}
525 		XS_CMD_S_CLEAR(xs);
526 		/*
527 		 * Fixup- if we get a QFULL, we need
528 		 * to set XS_BUSY as the error.
529 		 */
530 		if (xs->status == SCSI_QUEUE_FULL) {
531 			xs->error = XS_BUSY;
532 		}
533 		if (isp->isp_osinfo.paused) {
534 			isp->isp_osinfo.paused = 0;
535 			scsipi_channel_timed_thaw(&isp->isp_chanA);
536 			if (IS_DUALBUS(isp)) {
537 				scsipi_channel_timed_thaw(&isp->isp_chanB);
538 			}
539 		}
540 		scsipi_done(xs);
541 	}
542 }
543 
544 static void
545 isp_dog(void *arg)
546 {
547 	XS_T *xs = arg;
548 	struct ispsoftc *isp = XS_ISP(xs);
549 	u_int16_t handle;
550 
551 	ISP_ILOCK(isp);
552 	/*
553 	 * We've decided this command is dead. Make sure we're not trying
554 	 * to kill a command that's already dead by getting it's handle and
555 	 * and seeing whether it's still alive.
556 	 */
557 	handle = isp_find_handle(isp, xs);
558 	if (handle) {
559 		u_int16_t r, r1, i;
560 
561 		if (XS_CMD_DONE_P(xs)) {
562 			isp_prt(isp, ISP_LOGDEBUG1,
563 			    "watchdog found done cmd (handle 0x%x)", handle);
564 			ISP_IUNLOCK(isp);
565 			return;
566 		}
567 
568 		if (XS_CMD_WDOG_P(xs)) {
569 			isp_prt(isp, ISP_LOGDEBUG1,
570 			    "recursive watchdog (handle 0x%x)", handle);
571 			ISP_IUNLOCK(isp);
572 			return;
573 		}
574 
575 		XS_CMD_S_WDOG(xs);
576 
577 		i = 0;
578 		do {
579 			r = ISP_READ(isp, BIU_ISR);
580 			USEC_DELAY(1);
581 			r1 = ISP_READ(isp, BIU_ISR);
582 		} while (r != r1 && ++i < 1000);
583 
584 		if (INT_PENDING(isp, r) && isp_intr(isp) && XS_CMD_DONE_P(xs)) {
585 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog cleanup (%x, %x)",
586 			    handle, r);
587 			XS_CMD_C_WDOG(xs);
588 			isp_done(xs);
589 		} else if (XS_CMD_GRACE_P(xs)) {
590 			isp_prt(isp, ISP_LOGDEBUG1, "watchdog timeout (%x, %x)",
591 			    handle, r);
592 			/*
593 			 * Make sure the command is *really* dead before we
594 			 * release the handle (and DMA resources) for reuse.
595 			 */
596 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
597 
598 			/*
599 			 * After this point, the comamnd is really dead.
600 			 */
601 			if (XS_XFRLEN(xs)) {
602 				ISP_DMAFREE(isp, xs, handle);
603 			}
604 			isp_destroy_handle(isp, handle);
605 			XS_SETERR(xs, XS_TIMEOUT);
606 			XS_CMD_S_CLEAR(xs);
607 			isp_done(xs);
608 		} else {
609 			u_int16_t iptr, optr;
610 			ispreq_t *mp;
611 			isp_prt(isp, ISP_LOGDEBUG2,
612 			    "possible command timeout (%x, %x)", handle, r);
613 			XS_CMD_C_WDOG(xs);
614 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
615 			if (isp_getrqentry(isp, &iptr, &optr, (void **) &mp)) {
616 				ISP_UNLOCK(isp);
617 				return;
618 			}
619 			XS_CMD_S_GRACE(xs);
620 			MEMZERO((void *) mp, sizeof (*mp));
621 			mp->req_header.rqs_entry_count = 1;
622 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
623 			mp->req_modifier = SYNC_ALL;
624 			mp->req_target = XS_CHANNEL(xs) << 7;
625 			ISP_SWIZZLE_REQUEST(isp, mp);
626 			ISP_ADD_REQUEST(isp, iptr);
627 		}
628 	} else {
629 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
630 	}
631 	ISP_IUNLOCK(isp);
632 }
633 
634 /*
635  * Fibre Channel state cleanup thread
636  */
637 static void
638 isp_create_fc_worker(void *arg)
639 {
640 	struct ispsoftc *isp = arg;
641 
642 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
643 	    "%s:fc_thrd", isp->isp_name)) {
644 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
645 		panic("isp_create_fc_worker");
646 	}
647 
648 }
649 
650 static void
651 isp_fc_worker(void *arg)
652 {
653 	void scsipi_run_queue(struct scsipi_channel *);
654 	struct ispsoftc *isp = arg;
655 
656 	for (;;) {
657 		int s;
658 
659 		/*
660 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
661 		 */
662 		s = splbio();
663 		while (isp->isp_osinfo.threadwork) {
664 			isp->isp_osinfo.threadwork = 0;
665 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
666 				break;
667 			}
668 			isp->isp_osinfo.threadwork = 1;
669 			splx(s);
670 			delay(500 * 1000);
671 			s = splbio();
672 		}
673 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
674 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
675 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
676 			isp->isp_osinfo.threadwork = 1;
677 			splx(s);
678 			continue;
679 		}
680 
681 		if (isp->isp_osinfo.blocked) {
682 			isp->isp_osinfo.blocked = 0;
683 			isp_prt(isp, /* ISP_LOGDEBUG0 */ ISP_LOGALL, "restarting queues (freeze count %d)", isp->isp_chanA.chan_qfreeze);
684 
685 			scsipi_channel_thaw(&isp->isp_chanA, 1);
686 		}
687 
688 		if (isp->isp_osinfo.thread == NULL)
689 			break;
690 
691 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
692 
693 		splx(s);
694 	}
695 
696 	/* In case parent is waiting for us to exit. */
697 	wakeup(&isp->isp_osinfo.thread);
698 
699 	kthread_exit(0);
700 }
701 
702 /*
703  * Free any associated resources prior to decommissioning and
704  * set the card to a known state (so it doesn't wake up and kick
705  * us when we aren't expecting it to).
706  *
707  * Locks are held before coming here.
708  */
709 void
710 isp_uninit(struct ispsoftc *isp)
711 {
712 	isp_lock(isp);
713 	/*
714 	 * Leave with interrupts disabled.
715 	 */
716 	DISABLE_INTS(isp);
717 	isp_unlock(isp);
718 }
719 
720 int
721 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
722 {
723 	int bus, tgt;
724 
725 	switch (cmd) {
726 	case ISPASYNC_NEW_TGT_PARAMS:
727 	if (IS_SCSI(isp) && isp->isp_dblev) {
728 		sdparam *sdp = isp->isp_param;
729 		int flags;
730 		struct scsipi_xfer_mode xm;
731 
732 		tgt = *((int *) arg);
733 		bus = (tgt >> 16) & 0xffff;
734 		tgt &= 0xffff;
735 		sdp += bus;
736 		flags = sdp->isp_devparam[tgt].cur_dflags;
737 
738 		xm.xm_mode = 0;
739 		xm.xm_period = sdp->isp_devparam[tgt].cur_period;
740 		xm.xm_offset = sdp->isp_devparam[tgt].cur_offset;
741 		xm.xm_target = tgt;
742 
743 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
744 			xm.xm_mode |= PERIPH_CAP_SYNC;
745 		if (flags & DPARM_WIDE)
746 			xm.xm_mode |= PERIPH_CAP_WIDE16;
747 		if (flags & DPARM_TQING)
748 			xm.xm_mode |= PERIPH_CAP_TQING;
749 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
750 		    ASYNC_EVENT_XFER_MODE, &xm);
751 		break;
752 	}
753 	case ISPASYNC_BUS_RESET:
754 		bus = *((int *) arg);
755 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
756 		    ASYNC_EVENT_RESET, NULL);
757 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
758 		break;
759 	case ISPASYNC_LIP:
760 		/*
761 		 * Don't do queue freezes or blockage until we have the
762 		 * thread running that can unfreeze/unblock us.
763 		 */
764 		if (isp->isp_osinfo.blocked == 0)  {
765 			if (isp->isp_osinfo.thread) {
766 				isp->isp_osinfo.blocked = 1;
767 				scsipi_channel_freeze(&isp->isp_chanA, 1);
768 			}
769 		}
770 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
771 		break;
772 	case ISPASYNC_LOOP_RESET:
773 		/*
774 		 * Don't do queue freezes or blockage until we have the
775 		 * thread running that can unfreeze/unblock us.
776 		 */
777 		if (isp->isp_osinfo.blocked == 0) {
778 			if (isp->isp_osinfo.thread) {
779 				isp->isp_osinfo.blocked = 1;
780 				scsipi_channel_freeze(&isp->isp_chanA, 1);
781 			}
782 		}
783 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
784 		break;
785 	case ISPASYNC_LOOP_DOWN:
786 		/*
787 		 * Don't do queue freezes or blockage until we have the
788 		 * thread running that can unfreeze/unblock us.
789 		 */
790 		if (isp->isp_osinfo.blocked == 0) {
791 			if (isp->isp_osinfo.thread) {
792 				isp->isp_osinfo.blocked = 1;
793 				scsipi_channel_freeze(&isp->isp_chanA, 1);
794 			}
795 		}
796 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
797 		break;
798         case ISPASYNC_LOOP_UP:
799 		/*
800 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
801 		 * the FC worker thread. When the FC worker thread
802 		 * is done, let *it* call scsipi_channel_thaw...
803 		 */
804 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
805 		break;
806 	case ISPASYNC_PROMENADE:
807 	if (IS_FC(isp) && isp->isp_dblev) {
808 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
809 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
810 		const static char *roles[4] = {
811 		    "None", "Target", "Initiator", "Target/Initiator"
812 		};
813 		fcparam *fcp = isp->isp_param;
814 		int tgt = *((int *) arg);
815 		struct lportdb *lp = &fcp->portdb[tgt];
816 
817 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
818 		    roles[lp->roles & 0x3],
819 		    (lp->valid)? "Arrived" : "Departed",
820 		    (u_int32_t) (lp->port_wwn >> 32),
821 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
822 		    (u_int32_t) (lp->node_wwn >> 32),
823 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
824 		break;
825 	}
826 	case ISPASYNC_CHANGE_NOTIFY:
827 		if (arg == ISPASYNC_CHANGE_PDB) {
828 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
829 		} else if (arg == ISPASYNC_CHANGE_SNS) {
830 			isp_prt(isp, ISP_LOGINFO,
831 			    "Name Server Database Changed");
832 		}
833 
834 		/*
835 		 * We can set blocked here because we know it's now okay
836 		 * to try and run isp_fc_runstate (in order to build loop
837 		 * state). But we don't try and freeze the midlayer's queue
838 		 * if we have no thread that we can wake to later unfreeze
839 		 * it.
840 		 */
841 		if (isp->isp_osinfo.blocked == 0) {
842 			isp->isp_osinfo.blocked = 1;
843 			if (isp->isp_osinfo.thread) {
844 				scsipi_channel_freeze(&isp->isp_chanA, 1);
845 			}
846 		}
847 		/*
848 		 * Note that we have work for the thread to do, and
849 		 * if the thread is here already, wake it up.
850 		 */
851 		isp->isp_osinfo.threadwork++;
852 		if (isp->isp_osinfo.thread) {
853 			wakeup(&isp->isp_osinfo.thread);
854 		} else {
855 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
856 		}
857 		break;
858 	case ISPASYNC_FABRIC_DEV:
859 	{
860 		int target, lrange;
861 		struct lportdb *lp = NULL;
862 		char *pt;
863 		sns_ganrsp_t *resp = (sns_ganrsp_t *) arg;
864 		u_int32_t portid;
865 		u_int64_t wwpn, wwnn;
866 		fcparam *fcp = isp->isp_param;
867 
868 		portid =
869 		    (((u_int32_t) resp->snscb_port_id[0]) << 16) |
870 		    (((u_int32_t) resp->snscb_port_id[1]) << 8) |
871 		    (((u_int32_t) resp->snscb_port_id[2]));
872 
873 		wwpn =
874 		    (((u_int64_t)resp->snscb_portname[0]) << 56) |
875 		    (((u_int64_t)resp->snscb_portname[1]) << 48) |
876 		    (((u_int64_t)resp->snscb_portname[2]) << 40) |
877 		    (((u_int64_t)resp->snscb_portname[3]) << 32) |
878 		    (((u_int64_t)resp->snscb_portname[4]) << 24) |
879 		    (((u_int64_t)resp->snscb_portname[5]) << 16) |
880 		    (((u_int64_t)resp->snscb_portname[6]) <<  8) |
881 		    (((u_int64_t)resp->snscb_portname[7]));
882 
883 		wwnn =
884 		    (((u_int64_t)resp->snscb_nodename[0]) << 56) |
885 		    (((u_int64_t)resp->snscb_nodename[1]) << 48) |
886 		    (((u_int64_t)resp->snscb_nodename[2]) << 40) |
887 		    (((u_int64_t)resp->snscb_nodename[3]) << 32) |
888 		    (((u_int64_t)resp->snscb_nodename[4]) << 24) |
889 		    (((u_int64_t)resp->snscb_nodename[5]) << 16) |
890 		    (((u_int64_t)resp->snscb_nodename[6]) <<  8) |
891 		    (((u_int64_t)resp->snscb_nodename[7]));
892 		if (portid == 0 || wwpn == 0) {
893 			break;
894 		}
895 
896 		switch (resp->snscb_port_type) {
897 		case 1:
898 			pt = "   N_Port";
899 			break;
900 		case 2:
901 			pt = "  NL_Port";
902 			break;
903 		case 3:
904 			pt = "F/NL_Port";
905 			break;
906 		case 0x7f:
907 			pt = "  Nx_Port";
908 			break;
909 		case 0x81:
910 			pt = "  F_port";
911 			break;
912 		case 0x82:
913 			pt = "  FL_Port";
914 			break;
915 		case 0x84:
916 			pt = "   E_port";
917 			break;
918 		default:
919 			pt = "?";
920 			break;
921 		}
922 		isp_prt(isp, ISP_LOGINFO,
923 		    "%s @ 0x%x, Node 0x%08x%08x Port %08x%08x",
924 		    pt, portid, ((u_int32_t) (wwnn >> 32)), ((u_int32_t) wwnn),
925 		    ((u_int32_t) (wwpn >> 32)), ((u_int32_t) wwpn));
926 		/*
927 		 * We're only interested in SCSI_FCP types (for now)
928 		 */
929 		if ((resp->snscb_fc4_types[2] & 1) == 0) {
930 			break;
931 		}
932 		if (fcp->isp_topo != TOPO_F_PORT)
933 			lrange = FC_SNS_ID+1;
934 		else
935 			lrange = 0;
936 		/*
937 		 * Is it already in our list?
938 		 */
939 		for (target = lrange; target < MAX_FC_TARG; target++) {
940 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
941 				continue;
942 			}
943 			lp = &fcp->portdb[target];
944 			if (lp->port_wwn == wwpn && lp->node_wwn == wwnn) {
945 				lp->fabric_dev = 1;
946 				break;
947 			}
948 		}
949 		if (target < MAX_FC_TARG) {
950 			break;
951 		}
952 		for (target = lrange; target < MAX_FC_TARG; target++) {
953 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
954 				continue;
955 			}
956 			lp = &fcp->portdb[target];
957 			if (lp->port_wwn == 0) {
958 				break;
959 			}
960 		}
961 		if (target == MAX_FC_TARG) {
962 			isp_prt(isp, ISP_LOGWARN,
963 			    "no more space for fabric devices");
964 			break;
965 		}
966 		lp->node_wwn = wwnn;
967 		lp->port_wwn = wwpn;
968 		lp->portid = portid;
969 		lp->fabric_dev = 1;
970 		break;
971 	}
972 	default:
973 		break;
974 	}
975 	return (0);
976 }
977 
978 #include <machine/stdarg.h>
979 void
980 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
981 {
982 	va_list ap;
983 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
984 		return;
985 	}
986 	printf("%s: ", isp->isp_name);
987 	va_start(ap, fmt);
988 	vprintf(fmt, ap);
989 	va_end(ap);
990 	printf("\n");
991 }
992