xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision 62a8debe1dc62962e18a1c918def78666141273b)
1 /*	$NetBSD: scsipi_base.c,v 1.151 2010/02/12 11:39:33 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.151 2010/02/12 11:39:33 pooka Exp $");
35 
36 #include "opt_scsi.h"
37 
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/buf.h>
42 #include <sys/uio.h>
43 #include <sys/malloc.h>
44 #include <sys/pool.h>
45 #include <sys/errno.h>
46 #include <sys/device.h>
47 #include <sys/proc.h>
48 #include <sys/kthread.h>
49 #include <sys/hash.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include <dev/scsipi/scsi_spc.h>
54 #include <dev/scsipi/scsipi_all.h>
55 #include <dev/scsipi/scsipi_disk.h>
56 #include <dev/scsipi/scsipiconf.h>
57 #include <dev/scsipi/scsipi_base.h>
58 
59 #include <dev/scsipi/scsi_all.h>
60 #include <dev/scsipi/scsi_message.h>
61 
62 #include <machine/param.h>
63 
64 static int	scsipi_complete(struct scsipi_xfer *);
65 static void	scsipi_request_sense(struct scsipi_xfer *);
66 static int	scsipi_enqueue(struct scsipi_xfer *);
67 static void	scsipi_run_queue(struct scsipi_channel *chan);
68 
69 static void	scsipi_completion_thread(void *);
70 
71 static void	scsipi_get_tag(struct scsipi_xfer *);
72 static void	scsipi_put_tag(struct scsipi_xfer *);
73 
74 static int	scsipi_get_resource(struct scsipi_channel *);
75 static void	scsipi_put_resource(struct scsipi_channel *);
76 
77 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
78 		    struct scsipi_max_openings *);
79 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
80 		    struct scsipi_xfer_mode *);
81 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
82 
83 static struct pool scsipi_xfer_pool;
84 
85 /*
86  * scsipi_init:
87  *
88  *	Called when a scsibus or atapibus is attached to the system
89  *	to initialize shared data structures.
90  */
91 void
92 scsipi_init(void)
93 {
94 	static int scsipi_init_done;
95 
96 	if (scsipi_init_done)
97 		return;
98 	scsipi_init_done = 1;
99 
100 	/* Initialize the scsipi_xfer pool. */
101 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
102 	    0, 0, "scxspl", NULL, IPL_BIO);
103 	if (pool_prime(&scsipi_xfer_pool,
104 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
105 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
106 	}
107 }
108 
109 /*
110  * scsipi_channel_init:
111  *
112  *	Initialize a scsipi_channel when it is attached.
113  */
114 int
115 scsipi_channel_init(struct scsipi_channel *chan)
116 {
117 	struct scsipi_adapter *adapt = chan->chan_adapter;
118 	int i;
119 
120 	/* Initialize shared data. */
121 	scsipi_init();
122 
123 	/* Initialize the queues. */
124 	TAILQ_INIT(&chan->chan_queue);
125 	TAILQ_INIT(&chan->chan_complete);
126 
127 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
128 		LIST_INIT(&chan->chan_periphtab[i]);
129 
130 	/*
131 	 * Create the asynchronous completion thread.
132 	 */
133 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
134 	    &chan->chan_thread, "%s", chan->chan_name)) {
135 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
136 		    "channel %d\n", chan->chan_channel);
137 		panic("scsipi_channel_init");
138 	}
139 
140 	return (0);
141 }
142 
143 /*
144  * scsipi_channel_shutdown:
145  *
146  *	Shutdown a scsipi_channel.
147  */
148 void
149 scsipi_channel_shutdown(struct scsipi_channel *chan)
150 {
151 
152 	/*
153 	 * Shut down the completion thread.
154 	 */
155 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
156 	wakeup(&chan->chan_complete);
157 
158 	/*
159 	 * Now wait for the thread to exit.
160 	 */
161 	while (chan->chan_thread != NULL)
162 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
163 }
164 
165 static uint32_t
166 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
167 {
168 	uint32_t hash;
169 
170 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
171 	hash = hash32_buf(&l, sizeof(l), hash);
172 
173 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
174 }
175 
176 /*
177  * scsipi_insert_periph:
178  *
179  *	Insert a periph into the channel.
180  */
181 void
182 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
183 {
184 	uint32_t hash;
185 	int s;
186 
187 	hash = scsipi_chan_periph_hash(periph->periph_target,
188 	    periph->periph_lun);
189 
190 	s = splbio();
191 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
192 	splx(s);
193 }
194 
195 /*
196  * scsipi_remove_periph:
197  *
198  *	Remove a periph from the channel.
199  */
200 void
201 scsipi_remove_periph(struct scsipi_channel *chan,
202     struct scsipi_periph *periph)
203 {
204 	int s;
205 
206 	s = splbio();
207 	LIST_REMOVE(periph, periph_hash);
208 	splx(s);
209 }
210 
211 /*
212  * scsipi_lookup_periph:
213  *
214  *	Lookup a periph on the specified channel.
215  */
216 struct scsipi_periph *
217 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
218 {
219 	struct scsipi_periph *periph;
220 	uint32_t hash;
221 	int s;
222 
223 	if (target >= chan->chan_ntargets ||
224 	    lun >= chan->chan_nluns)
225 		return (NULL);
226 
227 	hash = scsipi_chan_periph_hash(target, lun);
228 
229 	s = splbio();
230 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
231 		if (periph->periph_target == target &&
232 		    periph->periph_lun == lun)
233 			break;
234 	}
235 	splx(s);
236 
237 	return (periph);
238 }
239 
240 /*
241  * scsipi_get_resource:
242  *
243  *	Allocate a single xfer `resource' from the channel.
244  *
245  *	NOTE: Must be called at splbio().
246  */
247 static int
248 scsipi_get_resource(struct scsipi_channel *chan)
249 {
250 	struct scsipi_adapter *adapt = chan->chan_adapter;
251 
252 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
253 		if (chan->chan_openings > 0) {
254 			chan->chan_openings--;
255 			return (1);
256 		}
257 		return (0);
258 	}
259 
260 	if (adapt->adapt_openings > 0) {
261 		adapt->adapt_openings--;
262 		return (1);
263 	}
264 	return (0);
265 }
266 
267 /*
268  * scsipi_grow_resources:
269  *
270  *	Attempt to grow resources for a channel.  If this succeeds,
271  *	we allocate one for our caller.
272  *
273  *	NOTE: Must be called at splbio().
274  */
275 static inline int
276 scsipi_grow_resources(struct scsipi_channel *chan)
277 {
278 
279 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
280 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
281 			scsipi_adapter_request(chan,
282 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
283 			return (scsipi_get_resource(chan));
284 		}
285 		/*
286 		 * ask the channel thread to do it. It'll have to thaw the
287 		 * queue
288 		 */
289 		scsipi_channel_freeze(chan, 1);
290 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
291 		wakeup(&chan->chan_complete);
292 		return (0);
293 	}
294 
295 	return (0);
296 }
297 
298 /*
299  * scsipi_put_resource:
300  *
301  *	Free a single xfer `resource' to the channel.
302  *
303  *	NOTE: Must be called at splbio().
304  */
305 static void
306 scsipi_put_resource(struct scsipi_channel *chan)
307 {
308 	struct scsipi_adapter *adapt = chan->chan_adapter;
309 
310 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
311 		chan->chan_openings++;
312 	else
313 		adapt->adapt_openings++;
314 }
315 
316 /*
317  * scsipi_get_tag:
318  *
319  *	Get a tag ID for the specified xfer.
320  *
321  *	NOTE: Must be called at splbio().
322  */
323 static void
324 scsipi_get_tag(struct scsipi_xfer *xs)
325 {
326 	struct scsipi_periph *periph = xs->xs_periph;
327 	int bit, tag;
328 	u_int word;
329 
330 	bit = 0;	/* XXX gcc */
331 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
332 		bit = ffs(periph->periph_freetags[word]);
333 		if (bit != 0)
334 			break;
335 	}
336 #ifdef DIAGNOSTIC
337 	if (word == PERIPH_NTAGWORDS) {
338 		scsipi_printaddr(periph);
339 		printf("no free tags\n");
340 		panic("scsipi_get_tag");
341 	}
342 #endif
343 
344 	bit -= 1;
345 	periph->periph_freetags[word] &= ~(1 << bit);
346 	tag = (word << 5) | bit;
347 
348 	/* XXX Should eventually disallow this completely. */
349 	if (tag >= periph->periph_openings) {
350 		scsipi_printaddr(periph);
351 		printf("WARNING: tag %d greater than available openings %d\n",
352 		    tag, periph->periph_openings);
353 	}
354 
355 	xs->xs_tag_id = tag;
356 }
357 
358 /*
359  * scsipi_put_tag:
360  *
361  *	Put the tag ID for the specified xfer back into the pool.
362  *
363  *	NOTE: Must be called at splbio().
364  */
365 static void
366 scsipi_put_tag(struct scsipi_xfer *xs)
367 {
368 	struct scsipi_periph *periph = xs->xs_periph;
369 	int word, bit;
370 
371 	word = xs->xs_tag_id >> 5;
372 	bit = xs->xs_tag_id & 0x1f;
373 
374 	periph->periph_freetags[word] |= (1 << bit);
375 }
376 
377 /*
378  * scsipi_get_xs:
379  *
380  *	Allocate an xfer descriptor and associate it with the
381  *	specified peripherial.  If the peripherial has no more
382  *	available command openings, we either block waiting for
383  *	one to become available, or fail.
384  */
385 struct scsipi_xfer *
386 scsipi_get_xs(struct scsipi_periph *periph, int flags)
387 {
388 	struct scsipi_xfer *xs;
389 	int s;
390 
391 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
392 
393 	KASSERT(!cold);
394 
395 #ifdef DIAGNOSTIC
396 	/*
397 	 * URGENT commands can never be ASYNC.
398 	 */
399 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
400 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
401 		scsipi_printaddr(periph);
402 		printf("URGENT and ASYNC\n");
403 		panic("scsipi_get_xs");
404 	}
405 #endif
406 
407 	s = splbio();
408 	/*
409 	 * Wait for a command opening to become available.  Rules:
410 	 *
411 	 *	- All xfers must wait for an available opening.
412 	 *	  Exception: URGENT xfers can proceed when
413 	 *	  active == openings, because we use the opening
414 	 *	  of the command we're recovering for.
415 	 *	- if the periph has sense pending, only URGENT & REQSENSE
416 	 *	  xfers may proceed.
417 	 *
418 	 *	- If the periph is recovering, only URGENT xfers may
419 	 *	  proceed.
420 	 *
421 	 *	- If the periph is currently executing a recovery
422 	 *	  command, URGENT commands must block, because only
423 	 *	  one recovery command can execute at a time.
424 	 */
425 	for (;;) {
426 		if (flags & XS_CTL_URGENT) {
427 			if (periph->periph_active > periph->periph_openings)
428 				goto wait_for_opening;
429 			if (periph->periph_flags & PERIPH_SENSE) {
430 				if ((flags & XS_CTL_REQSENSE) == 0)
431 					goto wait_for_opening;
432 			} else {
433 				if ((periph->periph_flags &
434 				    PERIPH_RECOVERY_ACTIVE) != 0)
435 					goto wait_for_opening;
436 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
437 			}
438 			break;
439 		}
440 		if (periph->periph_active >= periph->periph_openings ||
441 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
442 			goto wait_for_opening;
443 		periph->periph_active++;
444 		break;
445 
446  wait_for_opening:
447 		if (flags & XS_CTL_NOSLEEP) {
448 			splx(s);
449 			return (NULL);
450 		}
451 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
452 		periph->periph_flags |= PERIPH_WAITING;
453 		(void) tsleep(periph, PRIBIO, "getxs", 0);
454 	}
455 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
456 	xs = pool_get(&scsipi_xfer_pool,
457 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
458 	if (xs == NULL) {
459 		if (flags & XS_CTL_URGENT) {
460 			if ((flags & XS_CTL_REQSENSE) == 0)
461 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
462 		} else
463 			periph->periph_active--;
464 		scsipi_printaddr(periph);
465 		printf("unable to allocate %sscsipi_xfer\n",
466 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
467 	}
468 	splx(s);
469 
470 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
471 
472 	if (xs != NULL) {
473 		memset(xs, 0, sizeof(*xs));
474 		callout_init(&xs->xs_callout, 0);
475 		xs->xs_periph = periph;
476 		xs->xs_control = flags;
477 		xs->xs_status = 0;
478 		s = splbio();
479 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
480 		splx(s);
481 	}
482 	return (xs);
483 }
484 
485 /*
486  * scsipi_put_xs:
487  *
488  *	Release an xfer descriptor, decreasing the outstanding command
489  *	count for the peripherial.  If there is a thread waiting for
490  *	an opening, wake it up.  If not, kick any queued I/O the
491  *	peripherial may have.
492  *
493  *	NOTE: Must be called at splbio().
494  */
495 void
496 scsipi_put_xs(struct scsipi_xfer *xs)
497 {
498 	struct scsipi_periph *periph = xs->xs_periph;
499 	int flags = xs->xs_control;
500 
501 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502 
503 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 	callout_destroy(&xs->xs_callout);
505 	pool_put(&scsipi_xfer_pool, xs);
506 
507 #ifdef DIAGNOSTIC
508 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
509 	    periph->periph_active == 0) {
510 		scsipi_printaddr(periph);
511 		printf("recovery without a command to recovery for\n");
512 		panic("scsipi_put_xs");
513 	}
514 #endif
515 
516 	if (flags & XS_CTL_URGENT) {
517 		if ((flags & XS_CTL_REQSENSE) == 0)
518 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
519 	} else
520 		periph->periph_active--;
521 	if (periph->periph_active == 0 &&
522 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
523 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
524 		wakeup(&periph->periph_active);
525 	}
526 
527 	if (periph->periph_flags & PERIPH_WAITING) {
528 		periph->periph_flags &= ~PERIPH_WAITING;
529 		wakeup(periph);
530 	} else {
531 		if (periph->periph_switch->psw_start != NULL &&
532 		    device_is_active(periph->periph_dev)) {
533 			SC_DEBUG(periph, SCSIPI_DB2,
534 			    ("calling private start()\n"));
535 			(*periph->periph_switch->psw_start)(periph);
536 		}
537 	}
538 }
539 
540 /*
541  * scsipi_channel_freeze:
542  *
543  *	Freeze a channel's xfer queue.
544  */
545 void
546 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
547 {
548 	int s;
549 
550 	s = splbio();
551 	chan->chan_qfreeze += count;
552 	splx(s);
553 }
554 
555 /*
556  * scsipi_channel_thaw:
557  *
558  *	Thaw a channel's xfer queue.
559  */
560 void
561 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
562 {
563 	int s;
564 
565 	s = splbio();
566 	chan->chan_qfreeze -= count;
567 	/*
568 	 * Don't let the freeze count go negative.
569 	 *
570 	 * Presumably the adapter driver could keep track of this,
571 	 * but it might just be easier to do this here so as to allow
572 	 * multiple callers, including those outside the adapter driver.
573 	 */
574 	if (chan->chan_qfreeze < 0) {
575 		chan->chan_qfreeze = 0;
576 	}
577 	splx(s);
578 	/*
579 	 * Kick the channel's queue here.  Note, we may be running in
580 	 * interrupt context (softclock or HBA's interrupt), so the adapter
581 	 * driver had better not sleep.
582 	 */
583 	if (chan->chan_qfreeze == 0)
584 		scsipi_run_queue(chan);
585 }
586 
587 /*
588  * scsipi_channel_timed_thaw:
589  *
590  *	Thaw a channel after some time has expired. This will also
591  * 	run the channel's queue if the freeze count has reached 0.
592  */
593 void
594 scsipi_channel_timed_thaw(void *arg)
595 {
596 	struct scsipi_channel *chan = arg;
597 
598 	scsipi_channel_thaw(chan, 1);
599 }
600 
601 /*
602  * scsipi_periph_freeze:
603  *
604  *	Freeze a device's xfer queue.
605  */
606 void
607 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
608 {
609 	int s;
610 
611 	s = splbio();
612 	periph->periph_qfreeze += count;
613 	splx(s);
614 }
615 
616 /*
617  * scsipi_periph_thaw:
618  *
619  *	Thaw a device's xfer queue.
620  */
621 void
622 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
623 {
624 	int s;
625 
626 	s = splbio();
627 	periph->periph_qfreeze -= count;
628 #ifdef DIAGNOSTIC
629 	if (periph->periph_qfreeze < 0) {
630 		static const char pc[] = "periph freeze count < 0";
631 		scsipi_printaddr(periph);
632 		printf("%s\n", pc);
633 		panic(pc);
634 	}
635 #endif
636 	if (periph->periph_qfreeze == 0 &&
637 	    (periph->periph_flags & PERIPH_WAITING) != 0)
638 		wakeup(periph);
639 	splx(s);
640 }
641 
642 /*
643  * scsipi_periph_timed_thaw:
644  *
645  *	Thaw a device after some time has expired.
646  */
647 void
648 scsipi_periph_timed_thaw(void *arg)
649 {
650 	int s;
651 	struct scsipi_periph *periph = arg;
652 
653 	callout_stop(&periph->periph_callout);
654 
655 	s = splbio();
656 	scsipi_periph_thaw(periph, 1);
657 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
658 		/*
659 		 * Kick the channel's queue here.  Note, we're running in
660 		 * interrupt context (softclock), so the adapter driver
661 		 * had better not sleep.
662 		 */
663 		scsipi_run_queue(periph->periph_channel);
664 	} else {
665 		/*
666 		 * Tell the completion thread to kick the channel's queue here.
667 		 */
668 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
669 		wakeup(&periph->periph_channel->chan_complete);
670 	}
671 	splx(s);
672 }
673 
674 /*
675  * scsipi_wait_drain:
676  *
677  *	Wait for a periph's pending xfers to drain.
678  */
679 void
680 scsipi_wait_drain(struct scsipi_periph *periph)
681 {
682 	int s;
683 
684 	s = splbio();
685 	while (periph->periph_active != 0) {
686 		periph->periph_flags |= PERIPH_WAITDRAIN;
687 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
688 	}
689 	splx(s);
690 }
691 
692 /*
693  * scsipi_kill_pending:
694  *
695  *	Kill off all pending xfers for a periph.
696  *
697  *	NOTE: Must be called at splbio().
698  */
699 void
700 scsipi_kill_pending(struct scsipi_periph *periph)
701 {
702 
703 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
704 	scsipi_wait_drain(periph);
705 }
706 
707 /*
708  * scsipi_print_cdb:
709  * prints a command descriptor block (for debug purpose, error messages,
710  * SCSIPI_VERBOSE, ...)
711  */
712 void
713 scsipi_print_cdb(struct scsipi_generic *cmd)
714 {
715 	int i, j;
716 
717  	printf("0x%02x", cmd->opcode);
718 
719  	switch (CDB_GROUPID(cmd->opcode)) {
720  	case CDB_GROUPID_0:
721  		j = CDB_GROUP0;
722  		break;
723  	case CDB_GROUPID_1:
724  		j = CDB_GROUP1;
725  		break;
726  	case CDB_GROUPID_2:
727  		j = CDB_GROUP2;
728  		break;
729  	case CDB_GROUPID_3:
730  		j = CDB_GROUP3;
731  		break;
732  	case CDB_GROUPID_4:
733  		j = CDB_GROUP4;
734  		break;
735  	case CDB_GROUPID_5:
736  		j = CDB_GROUP5;
737  		break;
738  	case CDB_GROUPID_6:
739  		j = CDB_GROUP6;
740  		break;
741  	case CDB_GROUPID_7:
742  		j = CDB_GROUP7;
743  		break;
744  	default:
745  		j = 0;
746  	}
747  	if (j == 0)
748  		j = sizeof (cmd->bytes);
749  	for (i = 0; i < j-1; i++) /* already done the opcode */
750  		printf(" %02x", cmd->bytes[i]);
751 }
752 
753 /*
754  * scsipi_interpret_sense:
755  *
756  *	Look at the returned sense and act on the error, determining
757  *	the unix error number to pass back.  (0 = report no error)
758  *
759  *	NOTE: If we return ERESTART, we are expected to haved
760  *	thawed the device!
761  *
762  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
763  */
764 int
765 scsipi_interpret_sense(struct scsipi_xfer *xs)
766 {
767 	struct scsi_sense_data *sense;
768 	struct scsipi_periph *periph = xs->xs_periph;
769 	u_int8_t key;
770 	int error;
771 #ifndef	SCSIVERBOSE
772 	u_int32_t info;
773 	static const char *error_mes[] = {
774 		"soft error (corrected)",
775 		"not ready", "medium error",
776 		"non-media hardware failure", "illegal request",
777 		"unit attention", "readonly device",
778 		"no data found", "vendor unique",
779 		"copy aborted", "command aborted",
780 		"search returned equal", "volume overflow",
781 		"verify miscompare", "unknown error key"
782 	};
783 #endif
784 
785 	sense = &xs->sense.scsi_sense;
786 #ifdef SCSIPI_DEBUG
787 	if (periph->periph_flags & SCSIPI_DB1) {
788 		int count;
789 		scsipi_printaddr(periph);
790 		printf(" sense debug information:\n");
791 		printf("\tcode 0x%x valid %d\n",
792 			SSD_RCODE(sense->response_code),
793 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
794 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
795 			sense->segment,
796 			SSD_SENSE_KEY(sense->flags),
797 			sense->flags & SSD_ILI ? 1 : 0,
798 			sense->flags & SSD_EOM ? 1 : 0,
799 			sense->flags & SSD_FILEMARK ? 1 : 0);
800 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
801 			"extra bytes\n",
802 			sense->info[0],
803 			sense->info[1],
804 			sense->info[2],
805 			sense->info[3],
806 			sense->extra_len);
807 		printf("\textra: ");
808 		for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
809 			printf("0x%x ", sense->csi[count]);
810 		printf("\n");
811 	}
812 #endif
813 
814 	/*
815 	 * If the periph has it's own error handler, call it first.
816 	 * If it returns a legit error value, return that, otherwise
817 	 * it wants us to continue with normal error processing.
818 	 */
819 	if (periph->periph_switch->psw_error != NULL) {
820 		SC_DEBUG(periph, SCSIPI_DB2,
821 		    ("calling private err_handler()\n"));
822 		error = (*periph->periph_switch->psw_error)(xs);
823 		if (error != EJUSTRETURN)
824 			return (error);
825 	}
826 	/* otherwise use the default */
827 	switch (SSD_RCODE(sense->response_code)) {
828 
829 		/*
830 		 * Old SCSI-1 and SASI devices respond with
831 		 * codes other than 70.
832 		 */
833 	case 0x00:		/* no error (command completed OK) */
834 		return (0);
835 	case 0x04:		/* drive not ready after it was selected */
836 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
837 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
838 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
839 			return (0);
840 		/* XXX - display some sort of error here? */
841 		return (EIO);
842 	case 0x20:		/* invalid command */
843 		if ((xs->xs_control &
844 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
845 			return (0);
846 		return (EINVAL);
847 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
848 		return (EACCES);
849 
850 		/*
851 		 * If it's code 70, use the extended stuff and
852 		 * interpret the key
853 		 */
854 	case 0x71:		/* delayed error */
855 		scsipi_printaddr(periph);
856 		key = SSD_SENSE_KEY(sense->flags);
857 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
858 		/* FALLTHROUGH */
859 	case 0x70:
860 #ifndef	SCSIVERBOSE
861 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
862 			info = _4btol(sense->info);
863 		else
864 			info = 0;
865 #endif
866 		key = SSD_SENSE_KEY(sense->flags);
867 
868 		switch (key) {
869 		case SKEY_NO_SENSE:
870 		case SKEY_RECOVERED_ERROR:
871 			if (xs->resid == xs->datalen && xs->datalen) {
872 				/*
873 				 * Why is this here?
874 				 */
875 				xs->resid = 0;	/* not short read */
876 			}
877 		case SKEY_EQUAL:
878 			error = 0;
879 			break;
880 		case SKEY_NOT_READY:
881 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
882 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
883 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
884 				return (0);
885 			if (sense->asc == 0x3A) {
886 				error = ENODEV; /* Medium not present */
887 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
888 					return (error);
889 			} else
890 				error = EIO;
891 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
892 				return (error);
893 			break;
894 		case SKEY_ILLEGAL_REQUEST:
895 			if ((xs->xs_control &
896 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
897 				return (0);
898 			/*
899 			 * Handle the case where a device reports
900 			 * Logical Unit Not Supported during discovery.
901 			 */
902 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
903 			    sense->asc == 0x25 &&
904 			    sense->ascq == 0x00)
905 				return (EINVAL);
906 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
907 				return (EIO);
908 			error = EINVAL;
909 			break;
910 		case SKEY_UNIT_ATTENTION:
911 			if (sense->asc == 0x29 &&
912 			    sense->ascq == 0x00) {
913 				/* device or bus reset */
914 				return (ERESTART);
915 			}
916 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
917 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
918 			if ((xs->xs_control &
919 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
920 				/* XXX Should reupload any transient state. */
921 				(periph->periph_flags &
922 				 PERIPH_REMOVABLE) == 0) {
923 				return (ERESTART);
924 			}
925 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
926 				return (EIO);
927 			error = EIO;
928 			break;
929 		case SKEY_DATA_PROTECT:
930 			error = EROFS;
931 			break;
932 		case SKEY_BLANK_CHECK:
933 			error = 0;
934 			break;
935 		case SKEY_ABORTED_COMMAND:
936 			if (xs->xs_retries != 0) {
937 				xs->xs_retries--;
938 				error = ERESTART;
939 			} else
940 				error = EIO;
941 			break;
942 		case SKEY_VOLUME_OVERFLOW:
943 			error = ENOSPC;
944 			break;
945 		default:
946 			error = EIO;
947 			break;
948 		}
949 
950 #ifdef SCSIVERBOSE
951 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
952 			scsipi_print_sense(xs, 0);
953 #else
954 		if (key) {
955 			scsipi_printaddr(periph);
956 			printf("%s", error_mes[key - 1]);
957 			if ((sense->response_code & SSD_RCODE_VALID) != 0) {
958 				switch (key) {
959 				case SKEY_NOT_READY:
960 				case SKEY_ILLEGAL_REQUEST:
961 				case SKEY_UNIT_ATTENTION:
962 				case SKEY_DATA_PROTECT:
963 					break;
964 				case SKEY_BLANK_CHECK:
965 					printf(", requested size: %d (decimal)",
966 					    info);
967 					break;
968 				case SKEY_ABORTED_COMMAND:
969 					if (xs->xs_retries)
970 						printf(", retrying");
971 					printf(", cmd 0x%x, info 0x%x",
972 					    xs->cmd->opcode, info);
973 					break;
974 				default:
975 					printf(", info = %d (decimal)", info);
976 				}
977 			}
978 			if (sense->extra_len != 0) {
979 				int n;
980 				printf(", data =");
981 				for (n = 0; n < sense->extra_len; n++)
982 					printf(" %02x",
983 					    sense->csi[n]);
984 			}
985 			printf("\n");
986 		}
987 #endif
988 		return (error);
989 
990 	/*
991 	 * Some other code, just report it
992 	 */
993 	default:
994 #if    defined(SCSIDEBUG) || defined(DEBUG)
995 	{
996 		static const char *uc = "undecodable sense error";
997 		int i;
998 		u_int8_t *cptr = (u_int8_t *) sense;
999 		scsipi_printaddr(periph);
1000 		if (xs->cmd == &xs->cmdstore) {
1001 			printf("%s for opcode 0x%x, data=",
1002 			    uc, xs->cmdstore.opcode);
1003 		} else {
1004 			printf("%s, data=", uc);
1005 		}
1006 		for (i = 0; i < sizeof (sense); i++)
1007 			printf(" 0x%02x", *(cptr++) & 0xff);
1008 		printf("\n");
1009 	}
1010 #else
1011 		scsipi_printaddr(periph);
1012 		printf("Sense Error Code 0x%x",
1013 			SSD_RCODE(sense->response_code));
1014 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1015 			struct scsi_sense_data_unextended *usense =
1016 			    (struct scsi_sense_data_unextended *)sense;
1017 			printf(" at block no. %d (decimal)",
1018 			    _3btol(usense->block));
1019 		}
1020 		printf("\n");
1021 #endif
1022 		return (EIO);
1023 	}
1024 }
1025 
1026 /*
1027  * scsipi_test_unit_ready:
1028  *
1029  *	Issue a `test unit ready' request.
1030  */
1031 int
1032 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1033 {
1034 	struct scsi_test_unit_ready cmd;
1035 	int retries;
1036 
1037 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1038 	if (periph->periph_quirks & PQUIRK_NOTUR)
1039 		return (0);
1040 
1041 	if (flags & XS_CTL_DISCOVERY)
1042 		retries = 0;
1043 	else
1044 		retries = SCSIPIRETRIES;
1045 
1046 	memset(&cmd, 0, sizeof(cmd));
1047 	cmd.opcode = SCSI_TEST_UNIT_READY;
1048 
1049 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1050 	    retries, 10000, NULL, flags));
1051 }
1052 
1053 /*
1054  * scsipi_inquire:
1055  *
1056  *	Ask the device about itself.
1057  */
1058 int
1059 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1060     int flags)
1061 {
1062 	struct scsipi_inquiry cmd;
1063 	int error;
1064 	int retries;
1065 
1066 	if (flags & XS_CTL_DISCOVERY)
1067 		retries = 0;
1068 	else
1069 		retries = SCSIPIRETRIES;
1070 
1071 	/*
1072 	 * If we request more data than the device can provide, it SHOULD just
1073 	 * return a short reponse.  However, some devices error with an
1074 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1075 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1076 	 * and sends corrupted CRCs).  To work around this, and to bring our
1077 	 * behavior more in line with other OSes, we do a shorter inquiry,
1078 	 * covering all the SCSI-2 information, first, and then request more
1079 	 * data iff the "additional length" field indicates there is more.
1080 	 * - mycroft, 2003/10/16
1081 	 */
1082 	memset(&cmd, 0, sizeof(cmd));
1083 	cmd.opcode = INQUIRY;
1084 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1085 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1086 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1087 	    10000, NULL, flags | XS_CTL_DATA_IN);
1088 	if (!error &&
1089 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1090 #if 0
1091 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1092 #endif
1093 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1094 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1095 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1096 		    10000, NULL, flags | XS_CTL_DATA_IN);
1097 #if 0
1098 printf("inquire: error=%d\n", error);
1099 #endif
1100 	}
1101 
1102 #ifdef SCSI_OLD_NOINQUIRY
1103 	/*
1104 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1105 	 * This board doesn't support the INQUIRY command at all.
1106 	 */
1107 	if (error == EINVAL || error == EACCES) {
1108 		/*
1109 		 * Conjure up an INQUIRY response.
1110 		 */
1111 		inqbuf->device = (error == EINVAL ?
1112 			 SID_QUAL_LU_PRESENT :
1113 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1114 		inqbuf->dev_qual2 = 0;
1115 		inqbuf->version = 0;
1116 		inqbuf->response_format = SID_FORMAT_SCSI1;
1117 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1118 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1119 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1120 		error = 0;
1121 	}
1122 
1123 	/*
1124 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1125 	 * This board gives an empty response to an INQUIRY command.
1126 	 */
1127 	else if (error == 0 &&
1128 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1129 	    inqbuf->dev_qual2 == 0 &&
1130 	    inqbuf->version == 0 &&
1131 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1132 		/*
1133 		 * Fill out the INQUIRY response.
1134 		 */
1135 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1136 		inqbuf->dev_qual2 = SID_REMOVABLE;
1137 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1138 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1139 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1140 	}
1141 #endif /* SCSI_OLD_NOINQUIRY */
1142 
1143 	return error;
1144 }
1145 
1146 /*
1147  * scsipi_prevent:
1148  *
1149  *	Prevent or allow the user to remove the media
1150  */
1151 int
1152 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1153 {
1154 	struct scsi_prevent_allow_medium_removal cmd;
1155 
1156 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1157 		return 0;
1158 
1159 	memset(&cmd, 0, sizeof(cmd));
1160 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1161 	cmd.how = type;
1162 
1163 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1164 	    SCSIPIRETRIES, 5000, NULL, flags));
1165 }
1166 
1167 /*
1168  * scsipi_start:
1169  *
1170  *	Send a START UNIT.
1171  */
1172 int
1173 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1174 {
1175 	struct scsipi_start_stop cmd;
1176 
1177 	memset(&cmd, 0, sizeof(cmd));
1178 	cmd.opcode = START_STOP;
1179 	cmd.byte2 = 0x00;
1180 	cmd.how = type;
1181 
1182 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1183 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1184 }
1185 
1186 /*
1187  * scsipi_mode_sense, scsipi_mode_sense_big:
1188  *	get a sense page from a device
1189  */
1190 
1191 int
1192 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1193     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1194     int timeout)
1195 {
1196 	struct scsi_mode_sense_6 cmd;
1197 
1198 	memset(&cmd, 0, sizeof(cmd));
1199 	cmd.opcode = SCSI_MODE_SENSE_6;
1200 	cmd.byte2 = byte2;
1201 	cmd.page = page;
1202 	cmd.length = len & 0xff;
1203 
1204 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1205 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1206 }
1207 
1208 int
1209 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1210     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1211     int timeout)
1212 {
1213 	struct scsi_mode_sense_10 cmd;
1214 
1215 	memset(&cmd, 0, sizeof(cmd));
1216 	cmd.opcode = SCSI_MODE_SENSE_10;
1217 	cmd.byte2 = byte2;
1218 	cmd.page = page;
1219 	_lto2b(len, cmd.length);
1220 
1221 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1222 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1223 }
1224 
1225 int
1226 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1227     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1228     int timeout)
1229 {
1230 	struct scsi_mode_select_6 cmd;
1231 
1232 	memset(&cmd, 0, sizeof(cmd));
1233 	cmd.opcode = SCSI_MODE_SELECT_6;
1234 	cmd.byte2 = byte2;
1235 	cmd.length = len & 0xff;
1236 
1237 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1238 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1239 }
1240 
1241 int
1242 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1243     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1244     int timeout)
1245 {
1246 	struct scsi_mode_select_10 cmd;
1247 
1248 	memset(&cmd, 0, sizeof(cmd));
1249 	cmd.opcode = SCSI_MODE_SELECT_10;
1250 	cmd.byte2 = byte2;
1251 	_lto2b(len, cmd.length);
1252 
1253 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1254 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1255 }
1256 
1257 /*
1258  * scsipi_done:
1259  *
1260  *	This routine is called by an adapter's interrupt handler when
1261  *	an xfer is completed.
1262  */
1263 void
1264 scsipi_done(struct scsipi_xfer *xs)
1265 {
1266 	struct scsipi_periph *periph = xs->xs_periph;
1267 	struct scsipi_channel *chan = periph->periph_channel;
1268 	int s, freezecnt;
1269 
1270 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1271 #ifdef SCSIPI_DEBUG
1272 	if (periph->periph_dbflags & SCSIPI_DB1)
1273 		show_scsipi_cmd(xs);
1274 #endif
1275 
1276 	s = splbio();
1277 	/*
1278 	 * The resource this command was using is now free.
1279 	 */
1280 	if (xs->xs_status & XS_STS_DONE) {
1281 		/* XXX in certain circumstances, such as a device
1282 		 * being detached, a xs that has already been
1283 		 * scsipi_done()'d by the main thread will be done'd
1284 		 * again by scsibusdetach(). Putting the xs on the
1285 		 * chan_complete queue causes list corruption and
1286 		 * everyone dies. This prevents that, but perhaps
1287 		 * there should be better coordination somewhere such
1288 		 * that this won't ever happen (and can be turned into
1289 		 * a KASSERT().
1290 		 */
1291 		splx(s);
1292 		goto out;
1293 	}
1294 	scsipi_put_resource(chan);
1295 	xs->xs_periph->periph_sent--;
1296 
1297 	/*
1298 	 * If the command was tagged, free the tag.
1299 	 */
1300 	if (XS_CTL_TAGTYPE(xs) != 0)
1301 		scsipi_put_tag(xs);
1302 	else
1303 		periph->periph_flags &= ~PERIPH_UNTAG;
1304 
1305 	/* Mark the command as `done'. */
1306 	xs->xs_status |= XS_STS_DONE;
1307 
1308 #ifdef DIAGNOSTIC
1309 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1310 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1311 		panic("scsipi_done: ASYNC and POLL");
1312 #endif
1313 
1314 	/*
1315 	 * If the xfer had an error of any sort, freeze the
1316 	 * periph's queue.  Freeze it again if we were requested
1317 	 * to do so in the xfer.
1318 	 */
1319 	freezecnt = 0;
1320 	if (xs->error != XS_NOERROR)
1321 		freezecnt++;
1322 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1323 		freezecnt++;
1324 	if (freezecnt != 0)
1325 		scsipi_periph_freeze(periph, freezecnt);
1326 
1327 	/*
1328 	 * record the xfer with a pending sense, in case a SCSI reset is
1329 	 * received before the thread is waked up.
1330 	 */
1331 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1332 		periph->periph_flags |= PERIPH_SENSE;
1333 		periph->periph_xscheck = xs;
1334 	}
1335 
1336 	/*
1337 	 * If this was an xfer that was not to complete asynchronously,
1338 	 * let the requesting thread perform error checking/handling
1339 	 * in its context.
1340 	 */
1341 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1342 		splx(s);
1343 		/*
1344 		 * If it's a polling job, just return, to unwind the
1345 		 * call graph.  We don't need to restart the queue,
1346 		 * because pollings jobs are treated specially, and
1347 		 * are really only used during crash dumps anyway
1348 		 * (XXX or during boot-time autconfiguration of
1349 		 * ATAPI devices).
1350 		 */
1351 		if (xs->xs_control & XS_CTL_POLL)
1352 			return;
1353 		wakeup(xs);
1354 		goto out;
1355 	}
1356 
1357 	/*
1358 	 * Catch the extremely common case of I/O completing
1359 	 * without error; no use in taking a context switch
1360 	 * if we can handle it in interrupt context.
1361 	 */
1362 	if (xs->error == XS_NOERROR) {
1363 		splx(s);
1364 		(void) scsipi_complete(xs);
1365 		goto out;
1366 	}
1367 
1368 	/*
1369 	 * There is an error on this xfer.  Put it on the channel's
1370 	 * completion queue, and wake up the completion thread.
1371 	 */
1372 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1373 	splx(s);
1374 	wakeup(&chan->chan_complete);
1375 
1376  out:
1377 	/*
1378 	 * If there are more xfers on the channel's queue, attempt to
1379 	 * run them.
1380 	 */
1381 	scsipi_run_queue(chan);
1382 }
1383 
1384 /*
1385  * scsipi_complete:
1386  *
1387  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1388  *
1389  *	NOTE: This routine MUST be called with valid thread context
1390  *	except for the case where the following two conditions are
1391  *	true:
1392  *
1393  *		xs->error == XS_NOERROR
1394  *		XS_CTL_ASYNC is set in xs->xs_control
1395  *
1396  *	The semantics of this routine can be tricky, so here is an
1397  *	explanation:
1398  *
1399  *		0		Xfer completed successfully.
1400  *
1401  *		ERESTART	Xfer had an error, but was restarted.
1402  *
1403  *		anything else	Xfer had an error, return value is Unix
1404  *				errno.
1405  *
1406  *	If the return value is anything but ERESTART:
1407  *
1408  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1409  *		  the pool.
1410  *		- If there is a buf associated with the xfer,
1411  *		  it has been biodone()'d.
1412  */
1413 static int
1414 scsipi_complete(struct scsipi_xfer *xs)
1415 {
1416 	struct scsipi_periph *periph = xs->xs_periph;
1417 	struct scsipi_channel *chan = periph->periph_channel;
1418 	int error, s;
1419 
1420 #ifdef DIAGNOSTIC
1421 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1422 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1423 #endif
1424 	/*
1425 	 * If command terminated with a CHECK CONDITION, we need to issue a
1426 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1427 	 * we'll have the real status.
1428 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1429 	 * for this command.
1430 	 */
1431 	s = splbio();
1432 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1433 		/* request sense for a request sense ? */
1434 		if (xs->xs_control & XS_CTL_REQSENSE) {
1435 			scsipi_printaddr(periph);
1436 			printf("request sense for a request sense ?\n");
1437 			/* XXX maybe we should reset the device ? */
1438 			/* we've been frozen because xs->error != XS_NOERROR */
1439 			scsipi_periph_thaw(periph, 1);
1440 			splx(s);
1441 			if (xs->resid < xs->datalen) {
1442 				printf("we read %d bytes of sense anyway:\n",
1443 				    xs->datalen - xs->resid);
1444 #ifdef SCSIVERBOSE
1445 				scsipi_print_sense_data((void *)xs->data, 0);
1446 #endif
1447 			}
1448 			return EINVAL;
1449 		}
1450 		scsipi_request_sense(xs);
1451 	}
1452 	splx(s);
1453 
1454 	/*
1455 	 * If it's a user level request, bypass all usual completion
1456 	 * processing, let the user work it out..
1457 	 */
1458 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1459 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1460 		if (xs->error != XS_NOERROR)
1461 			scsipi_periph_thaw(periph, 1);
1462 		scsipi_user_done(xs);
1463 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1464 		return 0;
1465 	}
1466 
1467 	switch (xs->error) {
1468 	case XS_NOERROR:
1469 		error = 0;
1470 		break;
1471 
1472 	case XS_SENSE:
1473 	case XS_SHORTSENSE:
1474 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1475 		break;
1476 
1477 	case XS_RESOURCE_SHORTAGE:
1478 		/*
1479 		 * XXX Should freeze channel's queue.
1480 		 */
1481 		scsipi_printaddr(periph);
1482 		printf("adapter resource shortage\n");
1483 		/* FALLTHROUGH */
1484 
1485 	case XS_BUSY:
1486 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1487 			struct scsipi_max_openings mo;
1488 
1489 			/*
1490 			 * We set the openings to active - 1, assuming that
1491 			 * the command that got us here is the first one that
1492 			 * can't fit into the device's queue.  If that's not
1493 			 * the case, I guess we'll find out soon enough.
1494 			 */
1495 			mo.mo_target = periph->periph_target;
1496 			mo.mo_lun = periph->periph_lun;
1497 			if (periph->periph_active < periph->periph_openings)
1498 				mo.mo_openings = periph->periph_active - 1;
1499 			else
1500 				mo.mo_openings = periph->periph_openings - 1;
1501 #ifdef DIAGNOSTIC
1502 			if (mo.mo_openings < 0) {
1503 				scsipi_printaddr(periph);
1504 				printf("QUEUE FULL resulted in < 0 openings\n");
1505 				panic("scsipi_done");
1506 			}
1507 #endif
1508 			if (mo.mo_openings == 0) {
1509 				scsipi_printaddr(periph);
1510 				printf("QUEUE FULL resulted in 0 openings\n");
1511 				mo.mo_openings = 1;
1512 			}
1513 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1514 			error = ERESTART;
1515 		} else if (xs->xs_retries != 0) {
1516 			xs->xs_retries--;
1517 			/*
1518 			 * Wait one second, and try again.
1519 			 */
1520 			if ((xs->xs_control & XS_CTL_POLL) ||
1521 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1522 				delay(1000000);
1523 			} else if (!callout_pending(&periph->periph_callout)) {
1524 				scsipi_periph_freeze(periph, 1);
1525 				callout_reset(&periph->periph_callout,
1526 				    hz, scsipi_periph_timed_thaw, periph);
1527 			}
1528 			error = ERESTART;
1529 		} else
1530 			error = EBUSY;
1531 		break;
1532 
1533 	case XS_REQUEUE:
1534 		error = ERESTART;
1535 		break;
1536 
1537 	case XS_SELTIMEOUT:
1538 	case XS_TIMEOUT:
1539 		/*
1540 		 * If the device hasn't gone away, honor retry counts.
1541 		 *
1542 		 * Note that if we're in the middle of probing it,
1543 		 * it won't be found because it isn't here yet so
1544 		 * we won't honor the retry count in that case.
1545 		 */
1546 		if (scsipi_lookup_periph(chan, periph->periph_target,
1547 		    periph->periph_lun) && xs->xs_retries != 0) {
1548 			xs->xs_retries--;
1549 			error = ERESTART;
1550 		} else
1551 			error = EIO;
1552 		break;
1553 
1554 	case XS_RESET:
1555 		if (xs->xs_control & XS_CTL_REQSENSE) {
1556 			/*
1557 			 * request sense interrupted by reset: signal it
1558 			 * with EINTR return code.
1559 			 */
1560 			error = EINTR;
1561 		} else {
1562 			if (xs->xs_retries != 0) {
1563 				xs->xs_retries--;
1564 				error = ERESTART;
1565 			} else
1566 				error = EIO;
1567 		}
1568 		break;
1569 
1570 	case XS_DRIVER_STUFFUP:
1571 		scsipi_printaddr(periph);
1572 		printf("generic HBA error\n");
1573 		error = EIO;
1574 		break;
1575 	default:
1576 		scsipi_printaddr(periph);
1577 		printf("invalid return code from adapter: %d\n", xs->error);
1578 		error = EIO;
1579 		break;
1580 	}
1581 
1582 	s = splbio();
1583 	if (error == ERESTART) {
1584 		/*
1585 		 * If we get here, the periph has been thawed and frozen
1586 		 * again if we had to issue recovery commands.  Alternatively,
1587 		 * it may have been frozen again and in a timed thaw.  In
1588 		 * any case, we thaw the periph once we re-enqueue the
1589 		 * command.  Once the periph is fully thawed, it will begin
1590 		 * operation again.
1591 		 */
1592 		xs->error = XS_NOERROR;
1593 		xs->status = SCSI_OK;
1594 		xs->xs_status &= ~XS_STS_DONE;
1595 		xs->xs_requeuecnt++;
1596 		error = scsipi_enqueue(xs);
1597 		if (error == 0) {
1598 			scsipi_periph_thaw(periph, 1);
1599 			splx(s);
1600 			return (ERESTART);
1601 		}
1602 	}
1603 
1604 	/*
1605 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1606 	 * Thaw it here.
1607 	 */
1608 	if (xs->error != XS_NOERROR)
1609 		scsipi_periph_thaw(periph, 1);
1610 
1611 	if (periph->periph_switch->psw_done)
1612 		periph->periph_switch->psw_done(xs, error);
1613 
1614 	if (xs->xs_control & XS_CTL_ASYNC)
1615 		scsipi_put_xs(xs);
1616 	splx(s);
1617 
1618 	return (error);
1619 }
1620 
1621 /*
1622  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1623  * returns with a CHECK_CONDITION status. Must be called in valid thread
1624  * context and at splbio().
1625  */
1626 
1627 static void
1628 scsipi_request_sense(struct scsipi_xfer *xs)
1629 {
1630 	struct scsipi_periph *periph = xs->xs_periph;
1631 	int flags, error;
1632 	struct scsi_request_sense cmd;
1633 
1634 	periph->periph_flags |= PERIPH_SENSE;
1635 
1636 	/* if command was polling, request sense will too */
1637 	flags = xs->xs_control & XS_CTL_POLL;
1638 	/* Polling commands can't sleep */
1639 	if (flags)
1640 		flags |= XS_CTL_NOSLEEP;
1641 
1642 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1643 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1644 
1645 	memset(&cmd, 0, sizeof(cmd));
1646 	cmd.opcode = SCSI_REQUEST_SENSE;
1647 	cmd.length = sizeof(struct scsi_sense_data);
1648 
1649 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1650 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1651 	    0, 1000, NULL, flags);
1652 	periph->periph_flags &= ~PERIPH_SENSE;
1653 	periph->periph_xscheck = NULL;
1654 	switch (error) {
1655 	case 0:
1656 		/* we have a valid sense */
1657 		xs->error = XS_SENSE;
1658 		return;
1659 	case EINTR:
1660 		/* REQUEST_SENSE interrupted by bus reset. */
1661 		xs->error = XS_RESET;
1662 		return;
1663 	case EIO:
1664 		 /* request sense coudn't be performed */
1665 		/*
1666 		 * XXX this isn't quite right but we don't have anything
1667 		 * better for now
1668 		 */
1669 		xs->error = XS_DRIVER_STUFFUP;
1670 		return;
1671 	default:
1672 		 /* Notify that request sense failed. */
1673 		xs->error = XS_DRIVER_STUFFUP;
1674 		scsipi_printaddr(periph);
1675 		printf("request sense failed with error %d\n", error);
1676 		return;
1677 	}
1678 }
1679 
1680 /*
1681  * scsipi_enqueue:
1682  *
1683  *	Enqueue an xfer on a channel.
1684  */
1685 static int
1686 scsipi_enqueue(struct scsipi_xfer *xs)
1687 {
1688 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1689 	struct scsipi_xfer *qxs;
1690 	int s;
1691 
1692 	s = splbio();
1693 
1694 	/*
1695 	 * If the xfer is to be polled, and there are already jobs on
1696 	 * the queue, we can't proceed.
1697 	 */
1698 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1699 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1700 		splx(s);
1701 		xs->error = XS_DRIVER_STUFFUP;
1702 		return (EAGAIN);
1703 	}
1704 
1705 	/*
1706 	 * If we have an URGENT xfer, it's an error recovery command
1707 	 * and it should just go on the head of the channel's queue.
1708 	 */
1709 	if (xs->xs_control & XS_CTL_URGENT) {
1710 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1711 		goto out;
1712 	}
1713 
1714 	/*
1715 	 * If this xfer has already been on the queue before, we
1716 	 * need to reinsert it in the correct order.  That order is:
1717 	 *
1718 	 *	Immediately before the first xfer for this periph
1719 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1720 	 *
1721 	 * Failing that, at the end of the queue.  (We'll end up
1722 	 * there naturally.)
1723 	 */
1724 	if (xs->xs_requeuecnt != 0) {
1725 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1726 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1727 			if (qxs->xs_periph == xs->xs_periph &&
1728 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1729 				break;
1730 		}
1731 		if (qxs != NULL) {
1732 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1733 			    channel_q);
1734 			goto out;
1735 		}
1736 	}
1737 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1738  out:
1739 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1740 		scsipi_periph_thaw(xs->xs_periph, 1);
1741 	splx(s);
1742 	return (0);
1743 }
1744 
1745 /*
1746  * scsipi_run_queue:
1747  *
1748  *	Start as many xfers as possible running on the channel.
1749  */
1750 static void
1751 scsipi_run_queue(struct scsipi_channel *chan)
1752 {
1753 	struct scsipi_xfer *xs;
1754 	struct scsipi_periph *periph;
1755 	int s;
1756 
1757 	for (;;) {
1758 		s = splbio();
1759 
1760 		/*
1761 		 * If the channel is frozen, we can't do any work right
1762 		 * now.
1763 		 */
1764 		if (chan->chan_qfreeze != 0) {
1765 			splx(s);
1766 			return;
1767 		}
1768 
1769 		/*
1770 		 * Look for work to do, and make sure we can do it.
1771 		 */
1772 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1773 		     xs = TAILQ_NEXT(xs, channel_q)) {
1774 			periph = xs->xs_periph;
1775 
1776 			if ((periph->periph_sent >= periph->periph_openings) ||
1777 			    periph->periph_qfreeze != 0 ||
1778 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1779 				continue;
1780 
1781 			if ((periph->periph_flags &
1782 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1783 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1784 				continue;
1785 
1786 			/*
1787 			 * We can issue this xfer!
1788 			 */
1789 			goto got_one;
1790 		}
1791 
1792 		/*
1793 		 * Can't find any work to do right now.
1794 		 */
1795 		splx(s);
1796 		return;
1797 
1798  got_one:
1799 		/*
1800 		 * Have an xfer to run.  Allocate a resource from
1801 		 * the adapter to run it.  If we can't allocate that
1802 		 * resource, we don't dequeue the xfer.
1803 		 */
1804 		if (scsipi_get_resource(chan) == 0) {
1805 			/*
1806 			 * Adapter is out of resources.  If the adapter
1807 			 * supports it, attempt to grow them.
1808 			 */
1809 			if (scsipi_grow_resources(chan) == 0) {
1810 				/*
1811 				 * Wasn't able to grow resources,
1812 				 * nothing more we can do.
1813 				 */
1814 				if (xs->xs_control & XS_CTL_POLL) {
1815 					scsipi_printaddr(xs->xs_periph);
1816 					printf("polling command but no "
1817 					    "adapter resources");
1818 					/* We'll panic shortly... */
1819 				}
1820 				splx(s);
1821 
1822 				/*
1823 				 * XXX: We should be able to note that
1824 				 * XXX: that resources are needed here!
1825 				 */
1826 				return;
1827 			}
1828 			/*
1829 			 * scsipi_grow_resources() allocated the resource
1830 			 * for us.
1831 			 */
1832 		}
1833 
1834 		/*
1835 		 * We have a resource to run this xfer, do it!
1836 		 */
1837 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1838 
1839 		/*
1840 		 * If the command is to be tagged, allocate a tag ID
1841 		 * for it.
1842 		 */
1843 		if (XS_CTL_TAGTYPE(xs) != 0)
1844 			scsipi_get_tag(xs);
1845 		else
1846 			periph->periph_flags |= PERIPH_UNTAG;
1847 		periph->periph_sent++;
1848 		splx(s);
1849 
1850 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1851 	}
1852 #ifdef DIAGNOSTIC
1853 	panic("scsipi_run_queue: impossible");
1854 #endif
1855 }
1856 
1857 /*
1858  * scsipi_execute_xs:
1859  *
1860  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1861  */
1862 int
1863 scsipi_execute_xs(struct scsipi_xfer *xs)
1864 {
1865 	struct scsipi_periph *periph = xs->xs_periph;
1866 	struct scsipi_channel *chan = periph->periph_channel;
1867 	int oasync, async, poll, error, s;
1868 
1869 	KASSERT(!cold);
1870 
1871 	(chan->chan_bustype->bustype_cmd)(xs);
1872 
1873 	xs->xs_status &= ~XS_STS_DONE;
1874 	xs->error = XS_NOERROR;
1875 	xs->resid = xs->datalen;
1876 	xs->status = SCSI_OK;
1877 
1878 #ifdef SCSIPI_DEBUG
1879 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1880 		printf("scsipi_execute_xs: ");
1881 		show_scsipi_xs(xs);
1882 		printf("\n");
1883 	}
1884 #endif
1885 
1886 	/*
1887 	 * Deal with command tagging:
1888 	 *
1889 	 *	- If the device's current operating mode doesn't
1890 	 *	  include tagged queueing, clear the tag mask.
1891 	 *
1892 	 *	- If the device's current operating mode *does*
1893 	 *	  include tagged queueing, set the tag_type in
1894 	 *	  the xfer to the appropriate byte for the tag
1895 	 *	  message.
1896 	 */
1897 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1898 		(xs->xs_control & XS_CTL_REQSENSE)) {
1899 		xs->xs_control &= ~XS_CTL_TAGMASK;
1900 		xs->xs_tag_type = 0;
1901 	} else {
1902 		/*
1903 		 * If the request doesn't specify a tag, give Head
1904 		 * tags to URGENT operations and Ordered tags to
1905 		 * everything else.
1906 		 */
1907 		if (XS_CTL_TAGTYPE(xs) == 0) {
1908 			if (xs->xs_control & XS_CTL_URGENT)
1909 				xs->xs_control |= XS_CTL_HEAD_TAG;
1910 			else
1911 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1912 		}
1913 
1914 		switch (XS_CTL_TAGTYPE(xs)) {
1915 		case XS_CTL_ORDERED_TAG:
1916 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1917 			break;
1918 
1919 		case XS_CTL_SIMPLE_TAG:
1920 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1921 			break;
1922 
1923 		case XS_CTL_HEAD_TAG:
1924 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1925 			break;
1926 
1927 		default:
1928 			scsipi_printaddr(periph);
1929 			printf("invalid tag mask 0x%08x\n",
1930 			    XS_CTL_TAGTYPE(xs));
1931 			panic("scsipi_execute_xs");
1932 		}
1933 	}
1934 
1935 	/* If the adaptor wants us to poll, poll. */
1936 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1937 		xs->xs_control |= XS_CTL_POLL;
1938 
1939 	/*
1940 	 * If we don't yet have a completion thread, or we are to poll for
1941 	 * completion, clear the ASYNC flag.
1942 	 */
1943 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
1944 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1945 		xs->xs_control &= ~XS_CTL_ASYNC;
1946 
1947 	async = (xs->xs_control & XS_CTL_ASYNC);
1948 	poll = (xs->xs_control & XS_CTL_POLL);
1949 
1950 #ifdef DIAGNOSTIC
1951 	if (oasync != 0 && xs->bp == NULL)
1952 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1953 #endif
1954 
1955 	/*
1956 	 * Enqueue the transfer.  If we're not polling for completion, this
1957 	 * should ALWAYS return `no error'.
1958 	 */
1959 	error = scsipi_enqueue(xs);
1960 	if (error) {
1961 		if (poll == 0) {
1962 			scsipi_printaddr(periph);
1963 			printf("not polling, but enqueue failed with %d\n",
1964 			    error);
1965 			panic("scsipi_execute_xs");
1966 		}
1967 
1968 		scsipi_printaddr(periph);
1969 		printf("should have flushed queue?\n");
1970 		goto free_xs;
1971 	}
1972 
1973  restarted:
1974 	scsipi_run_queue(chan);
1975 
1976 	/*
1977 	 * The xfer is enqueued, and possibly running.  If it's to be
1978 	 * completed asynchronously, just return now.
1979 	 */
1980 	if (async)
1981 		return (0);
1982 
1983 	/*
1984 	 * Not an asynchronous command; wait for it to complete.
1985 	 */
1986 	s = splbio();
1987 	while ((xs->xs_status & XS_STS_DONE) == 0) {
1988 		if (poll) {
1989 			scsipi_printaddr(periph);
1990 			printf("polling command not done\n");
1991 			panic("scsipi_execute_xs");
1992 		}
1993 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
1994 	}
1995 	splx(s);
1996 
1997 	/*
1998 	 * Command is complete.  scsipi_done() has awakened us to perform
1999 	 * the error handling.
2000 	 */
2001 	error = scsipi_complete(xs);
2002 	if (error == ERESTART)
2003 		goto restarted;
2004 
2005 	/*
2006 	 * If it was meant to run async and we cleared aync ourselve,
2007 	 * don't return an error here. It has already been handled
2008 	 */
2009 	if (oasync)
2010 		error = 0;
2011 	/*
2012 	 * Command completed successfully or fatal error occurred.  Fall
2013 	 * into....
2014 	 */
2015  free_xs:
2016 	s = splbio();
2017 	scsipi_put_xs(xs);
2018 	splx(s);
2019 
2020 	/*
2021 	 * Kick the queue, keep it running in case it stopped for some
2022 	 * reason.
2023 	 */
2024 	scsipi_run_queue(chan);
2025 
2026 	return (error);
2027 }
2028 
2029 /*
2030  * scsipi_completion_thread:
2031  *
2032  *	This is the completion thread.  We wait for errors on
2033  *	asynchronous xfers, and perform the error handling
2034  *	function, restarting the command, if necessary.
2035  */
2036 static void
2037 scsipi_completion_thread(void *arg)
2038 {
2039 	struct scsipi_channel *chan = arg;
2040 	struct scsipi_xfer *xs;
2041 	int s;
2042 
2043 	if (chan->chan_init_cb)
2044 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2045 
2046 	s = splbio();
2047 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2048 	splx(s);
2049 	for (;;) {
2050 		s = splbio();
2051 		xs = TAILQ_FIRST(&chan->chan_complete);
2052 		if (xs == NULL && chan->chan_tflags  == 0) {
2053 			/* nothing to do; wait */
2054 			(void) tsleep(&chan->chan_complete, PRIBIO,
2055 			    "sccomp", 0);
2056 			splx(s);
2057 			continue;
2058 		}
2059 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2060 			/* call chan_callback from thread context */
2061 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2062 			chan->chan_callback(chan, chan->chan_callback_arg);
2063 			splx(s);
2064 			continue;
2065 		}
2066 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2067 			/* attempt to get more openings for this channel */
2068 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2069 			scsipi_adapter_request(chan,
2070 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2071 			scsipi_channel_thaw(chan, 1);
2072 			splx(s);
2073 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2074 				kpause("scsizzz", FALSE, hz/10, NULL);
2075 			continue;
2076 		}
2077 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2078 			/* explicitly run the queues for this channel */
2079 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2080 			scsipi_run_queue(chan);
2081 			splx(s);
2082 			continue;
2083 		}
2084 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2085 			splx(s);
2086 			break;
2087 		}
2088 		if (xs) {
2089 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2090 			splx(s);
2091 
2092 			/*
2093 			 * Have an xfer with an error; process it.
2094 			 */
2095 			(void) scsipi_complete(xs);
2096 
2097 			/*
2098 			 * Kick the queue; keep it running if it was stopped
2099 			 * for some reason.
2100 			 */
2101 			scsipi_run_queue(chan);
2102 		} else {
2103 			splx(s);
2104 		}
2105 	}
2106 
2107 	chan->chan_thread = NULL;
2108 
2109 	/* In case parent is waiting for us to exit. */
2110 	wakeup(&chan->chan_thread);
2111 
2112 	kthread_exit(0);
2113 }
2114 /*
2115  * scsipi_thread_call_callback:
2116  *
2117  * 	request to call a callback from the completion thread
2118  */
2119 int
2120 scsipi_thread_call_callback(struct scsipi_channel *chan,
2121     void (*callback)(struct scsipi_channel *, void *), void *arg)
2122 {
2123 	int s;
2124 
2125 	s = splbio();
2126 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2127 		/* kernel thread doesn't exist yet */
2128 		splx(s);
2129 		return ESRCH;
2130 	}
2131 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2132 		splx(s);
2133 		return EBUSY;
2134 	}
2135 	scsipi_channel_freeze(chan, 1);
2136 	chan->chan_callback = callback;
2137 	chan->chan_callback_arg = arg;
2138 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2139 	wakeup(&chan->chan_complete);
2140 	splx(s);
2141 	return(0);
2142 }
2143 
2144 /*
2145  * scsipi_async_event:
2146  *
2147  *	Handle an asynchronous event from an adapter.
2148  */
2149 void
2150 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2151     void *arg)
2152 {
2153 	int s;
2154 
2155 	s = splbio();
2156 	switch (event) {
2157 	case ASYNC_EVENT_MAX_OPENINGS:
2158 		scsipi_async_event_max_openings(chan,
2159 		    (struct scsipi_max_openings *)arg);
2160 		break;
2161 
2162 	case ASYNC_EVENT_XFER_MODE:
2163 		scsipi_async_event_xfer_mode(chan,
2164 		    (struct scsipi_xfer_mode *)arg);
2165 		break;
2166 	case ASYNC_EVENT_RESET:
2167 		scsipi_async_event_channel_reset(chan);
2168 		break;
2169 	}
2170 	splx(s);
2171 }
2172 
2173 /*
2174  * scsipi_print_xfer_mode:
2175  *
2176  *	Print a periph's capabilities.
2177  */
2178 void
2179 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2180 {
2181 	int period, freq, speed, mbs;
2182 
2183 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2184 		return;
2185 
2186 	aprint_normal_dev(periph->periph_dev, "");
2187 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2188 		period = scsipi_sync_factor_to_period(periph->periph_period);
2189 		aprint_normal("sync (%d.%02dns offset %d)",
2190 		    period / 100, period % 100, periph->periph_offset);
2191 	} else
2192 		aprint_normal("async");
2193 
2194 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2195 		aprint_normal(", 32-bit");
2196 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2197 		aprint_normal(", 16-bit");
2198 	else
2199 		aprint_normal(", 8-bit");
2200 
2201 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2202 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2203 		speed = freq;
2204 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2205 			speed *= 4;
2206 		else if (periph->periph_mode &
2207 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2208 			speed *= 2;
2209 		mbs = speed / 1000;
2210 		if (mbs > 0)
2211 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2212 		else
2213 			aprint_normal(" (%dKB/s)", speed % 1000);
2214 	}
2215 
2216 	aprint_normal(" transfers");
2217 
2218 	if (periph->periph_mode & PERIPH_CAP_TQING)
2219 		aprint_normal(", tagged queueing");
2220 
2221 	aprint_normal("\n");
2222 }
2223 
2224 /*
2225  * scsipi_async_event_max_openings:
2226  *
2227  *	Update the maximum number of outstanding commands a
2228  *	device may have.
2229  */
2230 static void
2231 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2232     struct scsipi_max_openings *mo)
2233 {
2234 	struct scsipi_periph *periph;
2235 	int minlun, maxlun;
2236 
2237 	if (mo->mo_lun == -1) {
2238 		/*
2239 		 * Wildcarded; apply it to all LUNs.
2240 		 */
2241 		minlun = 0;
2242 		maxlun = chan->chan_nluns - 1;
2243 	} else
2244 		minlun = maxlun = mo->mo_lun;
2245 
2246 	/* XXX This could really suck with a large LUN space. */
2247 	for (; minlun <= maxlun; minlun++) {
2248 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2249 		if (periph == NULL)
2250 			continue;
2251 
2252 		if (mo->mo_openings < periph->periph_openings)
2253 			periph->periph_openings = mo->mo_openings;
2254 		else if (mo->mo_openings > periph->periph_openings &&
2255 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2256 			periph->periph_openings = mo->mo_openings;
2257 	}
2258 }
2259 
2260 /*
2261  * scsipi_async_event_xfer_mode:
2262  *
2263  *	Update the xfer mode for all periphs sharing the
2264  *	specified I_T Nexus.
2265  */
2266 static void
2267 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2268     struct scsipi_xfer_mode *xm)
2269 {
2270 	struct scsipi_periph *periph;
2271 	int lun, announce, mode, period, offset;
2272 
2273 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2274 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2275 		if (periph == NULL)
2276 			continue;
2277 		announce = 0;
2278 
2279 		/*
2280 		 * Clamp the xfer mode down to this periph's capabilities.
2281 		 */
2282 		mode = xm->xm_mode & periph->periph_cap;
2283 		if (mode & PERIPH_CAP_SYNC) {
2284 			period = xm->xm_period;
2285 			offset = xm->xm_offset;
2286 		} else {
2287 			period = 0;
2288 			offset = 0;
2289 		}
2290 
2291 		/*
2292 		 * If we do not have a valid xfer mode yet, or the parameters
2293 		 * are different, announce them.
2294 		 */
2295 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2296 		    periph->periph_mode != mode ||
2297 		    periph->periph_period != period ||
2298 		    periph->periph_offset != offset)
2299 			announce = 1;
2300 
2301 		periph->periph_mode = mode;
2302 		periph->periph_period = period;
2303 		periph->periph_offset = offset;
2304 		periph->periph_flags |= PERIPH_MODE_VALID;
2305 
2306 		if (announce)
2307 			scsipi_print_xfer_mode(periph);
2308 	}
2309 }
2310 
2311 /*
2312  * scsipi_set_xfer_mode:
2313  *
2314  *	Set the xfer mode for the specified I_T Nexus.
2315  */
2316 void
2317 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2318 {
2319 	struct scsipi_xfer_mode xm;
2320 	struct scsipi_periph *itperiph;
2321 	int lun, s;
2322 
2323 	/*
2324 	 * Go to the minimal xfer mode.
2325 	 */
2326 	xm.xm_target = target;
2327 	xm.xm_mode = 0;
2328 	xm.xm_period = 0;			/* ignored */
2329 	xm.xm_offset = 0;			/* ignored */
2330 
2331 	/*
2332 	 * Find the first LUN we know about on this I_T Nexus.
2333 	 */
2334 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2335 		itperiph = scsipi_lookup_periph(chan, target, lun);
2336 		if (itperiph != NULL)
2337 			break;
2338 	}
2339 	if (itperiph != NULL) {
2340 		xm.xm_mode = itperiph->periph_cap;
2341 		/*
2342 		 * Now issue the request to the adapter.
2343 		 */
2344 		s = splbio();
2345 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2346 		splx(s);
2347 		/*
2348 		 * If we want this to happen immediately, issue a dummy
2349 		 * command, since most adapters can't really negotiate unless
2350 		 * they're executing a job.
2351 		 */
2352 		if (immed != 0) {
2353 			(void) scsipi_test_unit_ready(itperiph,
2354 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2355 			    XS_CTL_IGNORE_NOT_READY |
2356 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2357 		}
2358 	}
2359 }
2360 
2361 /*
2362  * scsipi_channel_reset:
2363  *
2364  *	handle scsi bus reset
2365  * called at splbio
2366  */
2367 static void
2368 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2369 {
2370 	struct scsipi_xfer *xs, *xs_next;
2371 	struct scsipi_periph *periph;
2372 	int target, lun;
2373 
2374 	/*
2375 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2376 	 * commands; as the sense is not available any more.
2377 	 * can't call scsipi_done() from here, as the command has not been
2378 	 * sent to the adapter yet (this would corrupt accounting).
2379 	 */
2380 
2381 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2382 		xs_next = TAILQ_NEXT(xs, channel_q);
2383 		if (xs->xs_control & XS_CTL_REQSENSE) {
2384 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2385 			xs->error = XS_RESET;
2386 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2387 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2388 				    channel_q);
2389 		}
2390 	}
2391 	wakeup(&chan->chan_complete);
2392 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2393 	for (target = 0; target < chan->chan_ntargets; target++) {
2394 		if (target == chan->chan_id)
2395 			continue;
2396 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2397 			periph = scsipi_lookup_periph(chan, target, lun);
2398 			if (periph) {
2399 				xs = periph->periph_xscheck;
2400 				if (xs)
2401 					xs->error = XS_RESET;
2402 			}
2403 		}
2404 	}
2405 }
2406 
2407 /*
2408  * scsipi_target_detach:
2409  *
2410  *	detach all periph associated with a I_T
2411  * 	must be called from valid thread context
2412  */
2413 int
2414 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2415     int flags)
2416 {
2417 	struct scsipi_periph *periph;
2418 	int ctarget, mintarget, maxtarget;
2419 	int clun, minlun, maxlun;
2420 	int error;
2421 
2422 	if (target == -1) {
2423 		mintarget = 0;
2424 		maxtarget = chan->chan_ntargets;
2425 	} else {
2426 		if (target == chan->chan_id)
2427 			return EINVAL;
2428 		if (target < 0 || target >= chan->chan_ntargets)
2429 			return EINVAL;
2430 		mintarget = target;
2431 		maxtarget = target + 1;
2432 	}
2433 
2434 	if (lun == -1) {
2435 		minlun = 0;
2436 		maxlun = chan->chan_nluns;
2437 	} else {
2438 		if (lun < 0 || lun >= chan->chan_nluns)
2439 			return EINVAL;
2440 		minlun = lun;
2441 		maxlun = lun + 1;
2442 	}
2443 
2444 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2445 		if (ctarget == chan->chan_id)
2446 			continue;
2447 
2448 		for (clun = minlun; clun < maxlun; clun++) {
2449 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2450 			if (periph == NULL)
2451 				continue;
2452 			error = config_detach(periph->periph_dev, flags);
2453 			if (error)
2454 				return (error);
2455 		}
2456 	}
2457 	return(0);
2458 }
2459 
2460 /*
2461  * scsipi_adapter_addref:
2462  *
2463  *	Add a reference to the adapter pointed to by the provided
2464  *	link, enabling the adapter if necessary.
2465  */
2466 int
2467 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2468 {
2469 	int s, error = 0;
2470 
2471 	s = splbio();
2472 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2473 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2474 		if (error)
2475 			adapt->adapt_refcnt--;
2476 	}
2477 	splx(s);
2478 	return (error);
2479 }
2480 
2481 /*
2482  * scsipi_adapter_delref:
2483  *
2484  *	Delete a reference to the adapter pointed to by the provided
2485  *	link, disabling the adapter if possible.
2486  */
2487 void
2488 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2489 {
2490 	int s;
2491 
2492 	s = splbio();
2493 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2494 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2495 	splx(s);
2496 }
2497 
2498 static struct scsipi_syncparam {
2499 	int	ss_factor;
2500 	int	ss_period;	/* ns * 100 */
2501 } scsipi_syncparams[] = {
2502 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2503 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2504 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2505 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2506 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2507 };
2508 static const int scsipi_nsyncparams =
2509     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2510 
2511 int
2512 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2513 {
2514 	int i;
2515 
2516 	for (i = 0; i < scsipi_nsyncparams; i++) {
2517 		if (period <= scsipi_syncparams[i].ss_period)
2518 			return (scsipi_syncparams[i].ss_factor);
2519 	}
2520 
2521 	return ((period / 100) / 4);
2522 }
2523 
2524 int
2525 scsipi_sync_factor_to_period(int factor)
2526 {
2527 	int i;
2528 
2529 	for (i = 0; i < scsipi_nsyncparams; i++) {
2530 		if (factor == scsipi_syncparams[i].ss_factor)
2531 			return (scsipi_syncparams[i].ss_period);
2532 	}
2533 
2534 	return ((factor * 4) * 100);
2535 }
2536 
2537 int
2538 scsipi_sync_factor_to_freq(int factor)
2539 {
2540 	int i;
2541 
2542 	for (i = 0; i < scsipi_nsyncparams; i++) {
2543 		if (factor == scsipi_syncparams[i].ss_factor)
2544 			return (100000000 / scsipi_syncparams[i].ss_period);
2545 	}
2546 
2547 	return (10000000 / ((factor * 4) * 10));
2548 }
2549 
2550 #ifdef SCSIPI_DEBUG
2551 /*
2552  * Given a scsipi_xfer, dump the request, in all it's glory
2553  */
2554 void
2555 show_scsipi_xs(struct scsipi_xfer *xs)
2556 {
2557 
2558 	printf("xs(%p): ", xs);
2559 	printf("xs_control(0x%08x)", xs->xs_control);
2560 	printf("xs_status(0x%08x)", xs->xs_status);
2561 	printf("periph(%p)", xs->xs_periph);
2562 	printf("retr(0x%x)", xs->xs_retries);
2563 	printf("timo(0x%x)", xs->timeout);
2564 	printf("cmd(%p)", xs->cmd);
2565 	printf("len(0x%x)", xs->cmdlen);
2566 	printf("data(%p)", xs->data);
2567 	printf("len(0x%x)", xs->datalen);
2568 	printf("res(0x%x)", xs->resid);
2569 	printf("err(0x%x)", xs->error);
2570 	printf("bp(%p)", xs->bp);
2571 	show_scsipi_cmd(xs);
2572 }
2573 
2574 void
2575 show_scsipi_cmd(struct scsipi_xfer *xs)
2576 {
2577 	u_char *b = (u_char *) xs->cmd;
2578 	int i = 0;
2579 
2580 	scsipi_printaddr(xs->xs_periph);
2581 	printf(" command: ");
2582 
2583 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2584 		while (i < xs->cmdlen) {
2585 			if (i)
2586 				printf(",");
2587 			printf("0x%x", b[i++]);
2588 		}
2589 		printf("-[%d bytes]\n", xs->datalen);
2590 		if (xs->datalen)
2591 			show_mem(xs->data, min(64, xs->datalen));
2592 	} else
2593 		printf("-RESET-\n");
2594 }
2595 
2596 void
2597 show_mem(u_char *address, int num)
2598 {
2599 	int x;
2600 
2601 	printf("------------------------------");
2602 	for (x = 0; x < num; x++) {
2603 		if ((x % 16) == 0)
2604 			printf("\n%03d: ", x);
2605 		printf("%02x ", *address++);
2606 	}
2607 	printf("\n------------------------------\n");
2608 }
2609 #endif /* SCSIPI_DEBUG */
2610