xref: /netbsd-src/sys/dev/ic/isp_netbsd.c (revision 23c8222edbfb0f0932d88a8351d3a0cf817dfb9e)
1 /* $NetBSD: isp_netbsd.c,v 1.64 2003/12/04 13:57:30 keihan Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@NetBSD.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.64 2003/12/04 13:57:30 keihan Exp $");
63 
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66 
67 
68 /*
69  * Set a timeout for the watchdogging of a command.
70  *
71  * The dimensional analysis is
72  *
73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74  *
75  *			=
76  *
77  *	(milliseconds / 1000) * hz = ticks
78  *
79  *
80  * For timeouts less than 1 second, we'll get zero. Because of this, and
81  * because we want to establish *our* timeout to be longer than what the
82  * firmware might do, we just add 3 seconds at the back end.
83  */
84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
85 
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93 
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98 
99 /*
100  * Complete attachment of hardware, include subdevices.
101  */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 	isp->isp_state = ISP_RUNSTATE;
106 
107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 	/*
111 	 * It's not stated whether max_periph is limited by SPI
112 	 * tag uage, but let's assume that it is.
113 	 */
114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 	} else {
120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 	}
122 
123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 	isp->isp_osinfo._chan.chan_channel = 0;
126 
127 	/*
128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 	 */
130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131 
132 	if (IS_FC(isp)) {
133 #if 0	/* XXX channel "settle" time seems to sidestep some nasty race */
134         	isp->isp_osinfo._chan.chan_flags = SCSIPI_CHAN_NOSETTLE;
135 #endif
136 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
137 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
138 		isp->isp_osinfo.threadwork = 1;
139 		/*
140 		 * Note that isp_create_fc_worker won't get called
141 		 * until much much later (after proc0 is created).
142 		 */
143 		kthread_create(isp_create_fc_worker, isp);
144 #ifdef	ISP_FW_CRASH_DUMP
145 		if (IS_2200(isp)) {
146 			FCPARAM(isp)->isp_dump_data =
147 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
148 				M_NOWAIT);
149 		} else if (IS_23XX(isp)) {
150 			FCPARAM(isp)->isp_dump_data =
151 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
152 				M_NOWAIT);
153 		}
154 		if (FCPARAM(isp)->isp_dump_data)
155 			FCPARAM(isp)->isp_dump_data[0] = 0;
156 #endif
157 	} else {
158 		int bus = 0;
159 		sdparam *sdp = isp->isp_param;
160 
161 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
162 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
163 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
164 		if (IS_DUALBUS(isp)) {
165 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
166 			sdp++;
167 			isp->isp_osinfo.discovered[1] =
168 			    1 << sdp->isp_initiator_id;
169 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
170 			isp->isp_osinfo._chan_b.chan_channel = 1;
171 		}
172 		ISP_LOCK(isp);
173 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 		if (IS_DUALBUS(isp)) {
175 			bus++;
176 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
177 		}
178 		ISP_UNLOCK(isp);
179 	}
180 
181 
182 	/*
183          * Defer enabling mailbox interrupts until later.
184          */
185         config_interrupts((struct device *) isp, isp_config_interrupts);
186 
187 	/*
188 	 * And attach children (if any).
189 	 */
190 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
191 	if (IS_DUALBUS(isp)) {
192 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
193 	}
194 }
195 
196 
197 static void
198 isp_config_interrupts(struct device *self)
199 {
200         struct ispsoftc *isp = (struct ispsoftc *) self;
201 
202 	/*
203 	 * After this point, we'll be doing the new configuration
204 	 * schema which allows interrupts, so we can do tsleep/wakeup
205 	 * for mailbox stuff at that point, if that's allowed.
206 	 */
207 	if (IS_FC(isp)) {
208 		isp->isp_osinfo.no_mbox_ints = 0;
209 	}
210 }
211 
212 
213 /*
214  * minphys our xfers
215  */
216 
217 static void
218 ispminphys_1020(struct buf *bp)
219 {
220 	if (bp->b_bcount >= (1 << 24)) {
221 		bp->b_bcount = (1 << 24);
222 	}
223 	minphys(bp);
224 }
225 
226 static void
227 ispminphys(struct buf *bp)
228 {
229 	if (bp->b_bcount >= (1 << 30)) {
230 		bp->b_bcount = (1 << 30);
231 	}
232 	minphys(bp);
233 }
234 
235 static int
236 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
237 	struct proc *p)
238 {
239 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
240 	int retval = ENOTTY;
241 
242 	switch (cmd) {
243 #ifdef	ISP_FW_CRASH_DUMP
244 	case ISP_GET_FW_CRASH_DUMP:
245 	{
246 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
247 		size_t sz;
248 
249 		retval = 0;
250 		if (IS_2200(isp))
251 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
252 		else
253 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
254 		ISP_LOCK(isp);
255 		if (ptr && *ptr) {
256 			void *uaddr = *((void **) addr);
257 			if (copyout(ptr, uaddr, sz)) {
258 				retval = EFAULT;
259 			} else {
260 				*ptr = 0;
261 			}
262 		} else {
263 			retval = ENXIO;
264 		}
265 		ISP_UNLOCK(isp);
266 		break;
267 	}
268 
269 	case ISP_FORCE_CRASH_DUMP:
270 		ISP_LOCK(isp);
271 		if (isp->isp_osinfo.blocked == 0) {
272                         isp->isp_osinfo.blocked = 1;
273                         scsipi_channel_freeze(&isp->isp_chanA, 1);
274                 }
275 		isp_fw_dump(isp);
276 		isp_reinit(isp);
277 		ISP_UNLOCK(isp);
278 		retval = 0;
279 		break;
280 #endif
281 	case ISP_SDBLEV:
282 	{
283 		int olddblev = isp->isp_dblev;
284 		isp->isp_dblev = *(int *)addr;
285 		*(int *)addr = olddblev;
286 		retval = 0;
287 		break;
288 	}
289 	case ISP_RESETHBA:
290 		ISP_LOCK(isp);
291 		isp_reinit(isp);
292 		ISP_UNLOCK(isp);
293 		retval = 0;
294 		break;
295 	case ISP_RESCAN:
296 		if (IS_FC(isp)) {
297 			ISP_LOCK(isp);
298 			if (isp_fc_runstate(isp, 5 * 1000000)) {
299 				retval = EIO;
300 			} else {
301 				retval = 0;
302 			}
303 			ISP_UNLOCK(isp);
304 		}
305 		break;
306 	case ISP_FC_LIP:
307 		if (IS_FC(isp)) {
308 			ISP_LOCK(isp);
309 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
310 				retval = EIO;
311 			} else {
312 				retval = 0;
313 			}
314 			ISP_UNLOCK(isp);
315 		}
316 		break;
317 	case ISP_FC_GETDINFO:
318 	{
319 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
320 		struct lportdb *lp;
321 
322 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
323 			retval = EINVAL;
324 			break;
325 		}
326 		ISP_LOCK(isp);
327 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
328 		if (lp->valid) {
329 			ifc->loopid = lp->loopid;
330 			ifc->portid = lp->portid;
331 			ifc->node_wwn = lp->node_wwn;
332 			ifc->port_wwn = lp->port_wwn;
333 			retval = 0;
334 		} else {
335 			retval = ENODEV;
336 		}
337 		ISP_UNLOCK(isp);
338 		break;
339 	}
340 	case ISP_GET_STATS:
341 	{
342 		isp_stats_t *sp = (isp_stats_t *) addr;
343 
344 		MEMZERO(sp, sizeof (*sp));
345 		sp->isp_stat_version = ISP_STATS_VERSION;
346 		sp->isp_type = isp->isp_type;
347 		sp->isp_revision = isp->isp_revision;
348 		ISP_LOCK(isp);
349 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
350 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
351 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
352 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
353 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
354 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
355 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
356 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
357 		ISP_UNLOCK(isp);
358 		retval = 0;
359 		break;
360 	}
361 	case ISP_CLR_STATS:
362 		ISP_LOCK(isp);
363 		isp->isp_intcnt = 0;
364 		isp->isp_intbogus = 0;
365 		isp->isp_intmboxc = 0;
366 		isp->isp_intoasync = 0;
367 		isp->isp_rsltccmplt = 0;
368 		isp->isp_fphccmplt = 0;
369 		isp->isp_rscchiwater = 0;
370 		isp->isp_fpcchiwater = 0;
371 		ISP_UNLOCK(isp);
372 		retval = 0;
373 		break;
374 	case ISP_FC_GETHINFO:
375 	{
376 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
377 		MEMZERO(hba, sizeof (*hba));
378 		ISP_LOCK(isp);
379 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
380 		hba->fc_scsi_supported = 1;
381 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
382 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
383 		hba->nvram_node_wwn = FCPARAM(isp)->isp_nodewwn;
384 		hba->nvram_port_wwn = FCPARAM(isp)->isp_portwwn;
385 		hba->active_node_wwn = ISP_NODEWWN(isp);
386 		hba->active_port_wwn = ISP_PORTWWN(isp);
387 		ISP_UNLOCK(isp);
388 		break;
389 	}
390 	case SCBUSIORESET:
391 		ISP_LOCK(isp);
392 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
393 			retval = EIO;
394 		else
395 			retval = 0;
396 		ISP_UNLOCK(isp);
397 		break;
398 	default:
399 		break;
400 	}
401 	return (retval);
402 }
403 
404 static INLINE void
405 ispcmd(struct ispsoftc *isp, XS_T *xs)
406 {
407 	ISP_LOCK(isp);
408 	if (isp->isp_state < ISP_RUNSTATE) {
409 		DISABLE_INTS(isp);
410 		isp_init(isp);
411 		if (isp->isp_state != ISP_INITSTATE) {
412 			ENABLE_INTS(isp);
413 			ISP_UNLOCK(isp);
414 			isp_prt(isp, ISP_LOGERR, "isp not at init state");
415 			XS_SETERR(xs, HBA_BOTCH);
416 			scsipi_done(xs);
417 			return;
418 		}
419 		isp->isp_state = ISP_RUNSTATE;
420 		ENABLE_INTS(isp);
421 	}
422 	/*
423 	 * Handle the case of a FC card where the FC thread hasn't
424 	 * fired up yet and we have loop state to clean up. If we
425 	 * can't clear things up and we've never seen loop up, bounce
426 	 * the command.
427 	 */
428 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
429 	    isp->isp_osinfo.thread == 0) {
430 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
431 		int delay_time;
432 
433 		if (xs->xs_control & XS_CTL_POLL) {
434 			isp->isp_osinfo.no_mbox_ints = 1;
435 		}
436 
437 		if (isp->isp_osinfo.loop_checked == 0) {
438 			delay_time = 10 * 1000000;
439 			isp->isp_osinfo.loop_checked = 1;
440 		} else {
441 			delay_time = 250000;
442 		}
443 
444 		if (isp_fc_runstate(isp, delay_time) != 0) {
445 			if (xs->xs_control & XS_CTL_POLL) {
446 				isp->isp_osinfo.no_mbox_ints = ombi;
447 			}
448 			if (FCPARAM(isp)->loop_seen_once == 0) {
449 				XS_SETERR(xs, HBA_SELTIMEOUT);
450 				scsipi_done(xs);
451 				ISP_UNLOCK(isp);
452 				return;
453 			}
454 			/*
455 			 * Otherwise, fall thru to be queued up for later.
456 			 */
457 		} else {
458 			int wasblocked =
459 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
460 			isp->isp_osinfo.threadwork = 0;
461 			isp->isp_osinfo.blocked =
462 			    isp->isp_osinfo.paused = 0;
463 			if (wasblocked) {
464 				scsipi_channel_thaw(&isp->isp_chanA, 1);
465 			}
466 		}
467 		if (xs->xs_control & XS_CTL_POLL) {
468 			isp->isp_osinfo.no_mbox_ints = ombi;
469 		}
470 	}
471 
472 	if (isp->isp_osinfo.paused) {
473 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
474 		xs->error = XS_RESOURCE_SHORTAGE;
475 		scsipi_done(xs);
476 		ISP_UNLOCK(isp);
477 		return;
478 	}
479 	if (isp->isp_osinfo.blocked) {
480 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
481 		xs->error = XS_REQUEUE;
482 		scsipi_done(xs);
483 		ISP_UNLOCK(isp);
484 		return;
485 	}
486 
487 	if (xs->xs_control & XS_CTL_POLL) {
488 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
489 		isp->isp_osinfo.no_mbox_ints = 1;
490 		isp_polled_cmd(isp, xs);
491 		isp->isp_osinfo.no_mbox_ints = ombi;
492 		ISP_UNLOCK(isp);
493 		return;
494 	}
495 
496 	switch (isp_start(xs)) {
497 	case CMD_QUEUED:
498 		if (xs->timeout) {
499 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
500 		}
501 		break;
502 	case CMD_EAGAIN:
503 		isp->isp_osinfo.paused = 1;
504 		xs->error = XS_RESOURCE_SHORTAGE;
505 		scsipi_channel_freeze(&isp->isp_chanA, 1);
506 		if (IS_DUALBUS(isp)) {
507 			scsipi_channel_freeze(&isp->isp_chanB, 1);
508 		}
509 		scsipi_done(xs);
510 		break;
511 	case CMD_RQLATER:
512 		/*
513 		 * We can only get RQLATER from FC devices (1 channel only)
514 		 *
515 		 * Also, if we've never seen loop up, bounce the command
516 		 * (somebody has booted with no FC cable connected)
517 		 */
518 		if (FCPARAM(isp)->loop_seen_once == 0) {
519 			XS_SETERR(xs, HBA_SELTIMEOUT);
520 			scsipi_done(xs);
521 			break;
522 		}
523 		if (isp->isp_osinfo.blocked == 0) {
524 			isp->isp_osinfo.blocked = 1;
525 			scsipi_channel_freeze(&isp->isp_chanA, 1);
526 		}
527 		xs->error = XS_REQUEUE;
528 		scsipi_done(xs);
529 		break;
530 	case CMD_COMPLETE:
531 		scsipi_done(xs);
532 		break;
533 	}
534 	ISP_UNLOCK(isp);
535 }
536 
537 static void
538 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
539 {
540 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
541 
542 	switch (req) {
543 	case ADAPTER_REQ_RUN_XFER:
544 		ispcmd(isp, (XS_T *) arg);
545 		break;
546 
547 	case ADAPTER_REQ_GROW_RESOURCES:
548 		/* Not supported. */
549 		break;
550 
551 	case ADAPTER_REQ_SET_XFER_MODE:
552 	if (IS_SCSI(isp)) {
553 		struct scsipi_xfer_mode *xm = arg;
554 		int dflags = 0;
555 		sdparam *sdp = SDPARAM(isp);
556 
557 		sdp += chan->chan_channel;
558 		if (xm->xm_mode & PERIPH_CAP_TQING)
559 			dflags |= DPARM_TQING;
560 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
561 			dflags |= DPARM_WIDE;
562 		if (xm->xm_mode & PERIPH_CAP_SYNC)
563 			dflags |= DPARM_SYNC;
564 		ISP_LOCK(isp);
565 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
566 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
567 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
568 		isp->isp_update |= (1 << chan->chan_channel);
569 		ISP_UNLOCK(isp);
570 		isp_prt(isp, ISP_LOGDEBUG1,
571 		    "ispioctl: device flags 0x%x for %d.%d.X",
572 		    dflags, chan->chan_channel, xm->xm_target);
573 		break;
574 	}
575 	default:
576 		break;
577 	}
578 }
579 
580 static void
581 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
582 {
583 	int result;
584 	int infinite = 0, mswait;
585 
586 	result = isp_start(xs);
587 
588 	switch (result) {
589 	case CMD_QUEUED:
590 		break;
591 	case CMD_RQLATER:
592 		if (XS_NOERR(xs)) {
593 			xs->error = XS_REQUEUE;
594 		}
595 	case CMD_EAGAIN:
596 		if (XS_NOERR(xs)) {
597 			xs->error = XS_RESOURCE_SHORTAGE;
598 		}
599 		/* FALLTHROUGH */
600 	case CMD_COMPLETE:
601 		scsipi_done(xs);
602 		return;
603 
604 	}
605 
606 	/*
607 	 * If we can't use interrupts, poll on completion.
608 	 */
609 	if ((mswait = XS_TIME(xs)) == 0)
610 		infinite = 1;
611 
612 	while (mswait || infinite) {
613 		u_int16_t isr, sema, mbox;
614 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
615 			isp_intr(isp, isr, sema, mbox);
616 			if (XS_CMD_DONE_P(xs)) {
617 				break;
618 			}
619 		}
620 		USEC_DELAY(1000);
621 		mswait -= 1;
622 	}
623 
624 	/*
625 	 * If no other error occurred but we didn't finish,
626 	 * something bad happened.
627 	 */
628 	if (XS_CMD_DONE_P(xs) == 0) {
629 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
630 			isp_reinit(isp);
631 		}
632 		if (XS_NOERR(xs)) {
633 			isp_prt(isp, ISP_LOGERR, "polled command timed out");
634 			XS_SETERR(xs, HBA_BOTCH);
635 		}
636 	}
637 	scsipi_done(xs);
638 }
639 
640 void
641 isp_done(XS_T *xs)
642 {
643 	XS_CMD_S_DONE(xs);
644 	if (XS_CMD_WDOG_P(xs) == 0) {
645 		struct ispsoftc *isp = XS_ISP(xs);
646 		callout_stop(&xs->xs_callout);
647 		if (XS_CMD_GRACE_P(xs)) {
648 			isp_prt(isp, ISP_LOGDEBUG1,
649 			    "finished command on borrowed time");
650 		}
651 		XS_CMD_S_CLEAR(xs);
652 		/*
653 		 * Fixup- if we get a QFULL, we need
654 		 * to set XS_BUSY as the error.
655 		 */
656 		if (xs->status == SCSI_QUEUE_FULL) {
657 			xs->error = XS_BUSY;
658 		}
659 		if (isp->isp_osinfo.paused) {
660 			isp->isp_osinfo.paused = 0;
661 			scsipi_channel_timed_thaw(&isp->isp_chanA);
662 			if (IS_DUALBUS(isp)) {
663 				scsipi_channel_timed_thaw(&isp->isp_chanB);
664 			}
665 		}
666 if (xs->error == XS_DRIVER_STUFFUP) {
667 isp_prt(isp, ISP_LOGERR, "BOTCHED cmd for %d.%d.%d cmd 0x%x datalen %ld",
668 XS_CHANNEL(xs), XS_TGT(xs), XS_LUN(xs), XS_CDBP(xs)[0], (long) XS_XFRLEN(xs));
669 }
670 		scsipi_done(xs);
671 	}
672 }
673 
674 static void
675 isp_dog(void *arg)
676 {
677 	XS_T *xs = arg;
678 	struct ispsoftc *isp = XS_ISP(xs);
679 	u_int16_t handle;
680 
681 	ISP_ILOCK(isp);
682 	/*
683 	 * We've decided this command is dead. Make sure we're not trying
684 	 * to kill a command that's already dead by getting it's handle and
685 	 * and seeing whether it's still alive.
686 	 */
687 	handle = isp_find_handle(isp, xs);
688 	if (handle) {
689 		u_int16_t isr, mbox, sema;
690 
691 		if (XS_CMD_DONE_P(xs)) {
692 			isp_prt(isp, ISP_LOGDEBUG1,
693 			    "watchdog found done cmd (handle 0x%x)", handle);
694 			ISP_IUNLOCK(isp);
695 			return;
696 		}
697 
698 		if (XS_CMD_WDOG_P(xs)) {
699 			isp_prt(isp, ISP_LOGDEBUG1,
700 			    "recursive watchdog (handle 0x%x)", handle);
701 			ISP_IUNLOCK(isp);
702 			return;
703 		}
704 
705 		XS_CMD_S_WDOG(xs);
706 
707 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
708 			isp_intr(isp, isr, sema, mbox);
709 
710 		}
711 		if (XS_CMD_DONE_P(xs)) {
712 			isp_prt(isp, ISP_LOGDEBUG1,
713 			    "watchdog cleanup for handle 0x%x", handle);
714 			XS_CMD_C_WDOG(xs);
715 			isp_done(xs);
716 		} else if (XS_CMD_GRACE_P(xs)) {
717 			isp_prt(isp, ISP_LOGDEBUG1,
718 			    "watchdog timeout for handle 0x%x", handle);
719 			/*
720 			 * Make sure the command is *really* dead before we
721 			 * release the handle (and DMA resources) for reuse.
722 			 */
723 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
724 
725 			/*
726 			 * After this point, the command is really dead.
727 			 */
728 			if (XS_XFRLEN(xs)) {
729 				ISP_DMAFREE(isp, xs, handle);
730 			}
731 			isp_destroy_handle(isp, handle);
732 			XS_SETERR(xs, XS_TIMEOUT);
733 			XS_CMD_S_CLEAR(xs);
734 			isp_done(xs);
735 		} else {
736 			u_int16_t nxti, optr;
737 			ispreq_t local, *mp = &local, *qe;
738 			isp_prt(isp, ISP_LOGDEBUG2,
739 			    "possible command timeout on handle %x", handle);
740 			XS_CMD_C_WDOG(xs);
741 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
742 			if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
743 				ISP_UNLOCK(isp);
744 				return;
745 			}
746 			XS_CMD_S_GRACE(xs);
747 			MEMZERO((void *) mp, sizeof (*mp));
748 			mp->req_header.rqs_entry_count = 1;
749 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
750 			mp->req_modifier = SYNC_ALL;
751 			mp->req_target = XS_CHANNEL(xs) << 7;
752 			isp_put_request(isp, mp, qe);
753 			ISP_ADD_REQUEST(isp, nxti);
754 		}
755 	} else {
756 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
757 	}
758 	ISP_IUNLOCK(isp);
759 }
760 
761 /*
762  * Fibre Channel state cleanup thread
763  */
764 static void
765 isp_create_fc_worker(void *arg)
766 {
767 	struct ispsoftc *isp = arg;
768 
769 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
770 	    "%s:fc_thrd", isp->isp_name)) {
771 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
772 		panic("isp_create_fc_worker");
773 	}
774 
775 }
776 
777 static void
778 isp_fc_worker(void *arg)
779 {
780 	void scsipi_run_queue(struct scsipi_channel *);
781 	struct ispsoftc *isp = arg;
782 
783 	for (;;) {
784 		int s;
785 
786 		/*
787 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
788 		 */
789 		s = splbio();
790 		while (isp->isp_osinfo.threadwork) {
791 			isp->isp_osinfo.threadwork = 0;
792 			if (isp_fc_runstate(isp, 250000) == 0) {
793 				break;
794 			}
795 			if  (isp->isp_osinfo.loop_checked &&
796 			     FCPARAM(isp)->loop_seen_once == 0) {
797 				splx(s);
798 				goto skip;
799 			}
800 			isp->isp_osinfo.loop_checked = 1;
801 			isp->isp_osinfo.threadwork = 1;
802 			splx(s);
803 			delay(500 * 1000);
804 			s = splbio();
805 		}
806 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
807 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
808 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
809 			isp->isp_osinfo.threadwork = 1;
810 			splx(s);
811 			continue;
812 		}
813 
814 		if (isp->isp_osinfo.blocked) {
815 			isp->isp_osinfo.blocked = 0;
816 			isp_prt(isp, ISP_LOGDEBUG0,
817 			    "restarting queues (freeze count %d)",
818 			    isp->isp_chanA.chan_qfreeze);
819 			scsipi_channel_thaw(&isp->isp_chanA, 1);
820 		}
821 
822 		if (isp->isp_osinfo.thread == NULL)
823 			break;
824 
825 skip:
826 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
827 
828 		splx(s);
829 	}
830 
831 	/* In case parent is waiting for us to exit. */
832 	wakeup(&isp->isp_osinfo.thread);
833 
834 	kthread_exit(0);
835 }
836 
837 /*
838  * Free any associated resources prior to decommissioning and
839  * set the card to a known state (so it doesn't wake up and kick
840  * us when we aren't expecting it to).
841  *
842  * Locks are held before coming here.
843  */
844 void
845 isp_uninit(struct ispsoftc *isp)
846 {
847 	isp_lock(isp);
848 	/*
849 	 * Leave with interrupts disabled.
850 	 */
851 	DISABLE_INTS(isp);
852 	isp_unlock(isp);
853 }
854 
855 int
856 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
857 {
858 	int bus, tgt;
859 
860 	switch (cmd) {
861 	case ISPASYNC_NEW_TGT_PARAMS:
862 	if (IS_SCSI(isp) && isp->isp_dblev) {
863 		sdparam *sdp = isp->isp_param;
864 		int flags;
865 		struct scsipi_xfer_mode xm;
866 
867 		tgt = *((int *) arg);
868 		bus = (tgt >> 16) & 0xffff;
869 		tgt &= 0xffff;
870 		sdp += bus;
871 		flags = sdp->isp_devparam[tgt].actv_flags;
872 
873 		xm.xm_mode = 0;
874 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
875 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
876 		xm.xm_target = tgt;
877 
878 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
879 			xm.xm_mode |= PERIPH_CAP_SYNC;
880 		if (flags & DPARM_WIDE)
881 			xm.xm_mode |= PERIPH_CAP_WIDE16;
882 		if (flags & DPARM_TQING)
883 			xm.xm_mode |= PERIPH_CAP_TQING;
884 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
885 		    ASYNC_EVENT_XFER_MODE, &xm);
886 		break;
887 	}
888 	case ISPASYNC_BUS_RESET:
889 		bus = *((int *) arg);
890 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
891 		    ASYNC_EVENT_RESET, NULL);
892 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
893 		break;
894 	case ISPASYNC_LIP:
895 		/*
896 		 * Don't do queue freezes or blockage until we have the
897 		 * thread running that can unfreeze/unblock us.
898 		 */
899 		if (isp->isp_osinfo.blocked == 0)  {
900 			if (isp->isp_osinfo.thread) {
901 				isp->isp_osinfo.blocked = 1;
902 				scsipi_channel_freeze(&isp->isp_chanA, 1);
903 			}
904 		}
905 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
906 		break;
907 	case ISPASYNC_LOOP_RESET:
908 		/*
909 		 * Don't do queue freezes or blockage until we have the
910 		 * thread running that can unfreeze/unblock us.
911 		 */
912 		if (isp->isp_osinfo.blocked == 0) {
913 			if (isp->isp_osinfo.thread) {
914 				isp->isp_osinfo.blocked = 1;
915 				scsipi_channel_freeze(&isp->isp_chanA, 1);
916 			}
917 		}
918 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
919 		break;
920 	case ISPASYNC_LOOP_DOWN:
921 		/*
922 		 * Don't do queue freezes or blockage until we have the
923 		 * thread running that can unfreeze/unblock us.
924 		 */
925 		if (isp->isp_osinfo.blocked == 0) {
926 			if (isp->isp_osinfo.thread) {
927 				isp->isp_osinfo.blocked = 1;
928 				scsipi_channel_freeze(&isp->isp_chanA, 1);
929 			}
930 		}
931 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
932 		break;
933         case ISPASYNC_LOOP_UP:
934 		/*
935 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
936 		 * the FC worker thread. When the FC worker thread
937 		 * is done, let *it* call scsipi_channel_thaw...
938 		 */
939 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
940 		break;
941 	case ISPASYNC_PROMENADE:
942 	if (IS_FC(isp) && isp->isp_dblev) {
943 		static const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
944 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
945 		const static char *const roles[4] = {
946 		    "None", "Target", "Initiator", "Target/Initiator"
947 		};
948 		fcparam *fcp = isp->isp_param;
949 		int tgt = *((int *) arg);
950 		struct lportdb *lp = &fcp->portdb[tgt];
951 
952 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
953 		    roles[lp->roles & 0x3],
954 		    (lp->valid)? "Arrived" : "Departed",
955 		    (u_int32_t) (lp->port_wwn >> 32),
956 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
957 		    (u_int32_t) (lp->node_wwn >> 32),
958 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
959 		break;
960 	}
961 	case ISPASYNC_CHANGE_NOTIFY:
962 		if (arg == ISPASYNC_CHANGE_PDB) {
963 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
964 		} else if (arg == ISPASYNC_CHANGE_SNS) {
965 			isp_prt(isp, ISP_LOGINFO,
966 			    "Name Server Database Changed");
967 		}
968 
969 		/*
970 		 * We can set blocked here because we know it's now okay
971 		 * to try and run isp_fc_runstate (in order to build loop
972 		 * state). But we don't try and freeze the midlayer's queue
973 		 * if we have no thread that we can wake to later unfreeze
974 		 * it.
975 		 */
976 		if (isp->isp_osinfo.blocked == 0) {
977 			isp->isp_osinfo.blocked = 1;
978 			if (isp->isp_osinfo.thread) {
979 				scsipi_channel_freeze(&isp->isp_chanA, 1);
980 			}
981 		}
982 		/*
983 		 * Note that we have work for the thread to do, and
984 		 * if the thread is here already, wake it up.
985 		 */
986 		isp->isp_osinfo.threadwork++;
987 		if (isp->isp_osinfo.thread) {
988 			wakeup(&isp->isp_osinfo.thread);
989 		} else {
990 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
991 		}
992 		break;
993 	case ISPASYNC_FABRIC_DEV:
994 	{
995 		int target, base, lim;
996 		fcparam *fcp = isp->isp_param;
997 		struct lportdb *lp = NULL;
998 		struct lportdb *clp = (struct lportdb *) arg;
999 		char *pt;
1000 
1001 		switch (clp->port_type) {
1002 		case 1:
1003 			pt = "   N_Port";
1004 			break;
1005 		case 2:
1006 			pt = "  NL_Port";
1007 			break;
1008 		case 3:
1009 			pt = "F/NL_Port";
1010 			break;
1011 		case 0x7f:
1012 			pt = "  Nx_Port";
1013 			break;
1014 		case 0x81:
1015 			pt = "  F_port";
1016 			break;
1017 		case 0x82:
1018 			pt = "  FL_Port";
1019 			break;
1020 		case 0x84:
1021 			pt = "   E_port";
1022 			break;
1023 		default:
1024 			pt = " ";
1025 			break;
1026 		}
1027 
1028 		isp_prt(isp, ISP_LOGINFO,
1029 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1030 
1031 		/*
1032 		 * If we don't have an initiator role we bail.
1033 		 *
1034 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1035 		 */
1036 
1037 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1038 			break;
1039 		}
1040 
1041 		/*
1042 		 * Is this entry for us? If so, we bail.
1043 		 */
1044 
1045 		if (fcp->isp_portid == clp->portid) {
1046 			break;
1047 		}
1048 
1049 		/*
1050 		 * Else, the default policy is to find room for it in
1051 		 * our local port database. Later, when we execute
1052 		 * the call to isp_pdb_sync either this newly arrived
1053 		 * or already logged in device will be (re)announced.
1054 		 */
1055 
1056 		if (fcp->isp_topo == TOPO_FL_PORT)
1057 			base = FC_SNS_ID+1;
1058 		else
1059 			base = 0;
1060 
1061 		if (fcp->isp_topo == TOPO_N_PORT)
1062 			lim = 1;
1063 		else
1064 			lim = MAX_FC_TARG;
1065 
1066 		/*
1067 		 * Is it already in our list?
1068 		 */
1069 		for (target = base; target < lim; target++) {
1070 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1071 				continue;
1072 			}
1073 			lp = &fcp->portdb[target];
1074 			if (lp->port_wwn == clp->port_wwn &&
1075 			    lp->node_wwn == clp->node_wwn) {
1076 				lp->fabric_dev = 1;
1077 				break;
1078 			}
1079 		}
1080 		if (target < lim) {
1081 			break;
1082 		}
1083 		for (target = base; target < lim; target++) {
1084 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1085 				continue;
1086 			}
1087 			lp = &fcp->portdb[target];
1088 			if (lp->port_wwn == 0) {
1089 				break;
1090 			}
1091 		}
1092 		if (target == lim) {
1093 			isp_prt(isp, ISP_LOGWARN,
1094 			    "out of space for fabric devices");
1095 			break;
1096 		}
1097 		lp->port_type = clp->port_type;
1098 		lp->fc4_type = clp->fc4_type;
1099 		lp->node_wwn = clp->node_wwn;
1100 		lp->port_wwn = clp->port_wwn;
1101 		lp->portid = clp->portid;
1102 		lp->fabric_dev = 1;
1103 		break;
1104 	}
1105 	case ISPASYNC_FW_CRASH:
1106 	{
1107 		u_int16_t mbox1, mbox6;
1108 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1109 		if (IS_DUALBUS(isp)) {
1110 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1111 		} else {
1112 			mbox6 = 0;
1113 		}
1114                 isp_prt(isp, ISP_LOGERR,
1115                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1116                     mbox6, mbox1);
1117 #ifdef	ISP_FW_CRASH_DUMP
1118 		if (IS_FC(isp)) {
1119 			if (isp->isp_osinfo.blocked == 0) {
1120 				isp->isp_osinfo.blocked = 1;
1121 				scsipi_channel_freeze(&isp->isp_chanA, 1);
1122 			}
1123 			isp_fw_dump(isp);
1124 		}
1125 		isp_reinit(isp);
1126 		isp_async(isp, ISPASYNC_FW_RESTART, NULL);
1127 #endif
1128 		break;
1129 	}
1130 	default:
1131 		break;
1132 	}
1133 	return (0);
1134 }
1135 
1136 #include <machine/stdarg.h>
1137 void
1138 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1139 {
1140 	va_list ap;
1141 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1142 		return;
1143 	}
1144 	printf("%s: ", isp->isp_name);
1145 	va_start(ap, fmt);
1146 	vprintf(fmt, ap);
1147 	va_end(ap);
1148 	printf("\n");
1149 }
1150