xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision de1dfb1250df962f1ff3a011772cf58e605aed11)
1 /*	$NetBSD: scsipi_base.c,v 1.112 2004/09/09 19:35:31 bouyer Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.112 2004/09/09 19:35:31 bouyer Exp $");
42 
43 #include "opt_scsi.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 #include <dev/scsipi/scsipi_all.h>
61 #include <dev/scsipi/scsipi_disk.h>
62 #include <dev/scsipi/scsipiconf.h>
63 #include <dev/scsipi/scsipi_base.h>
64 
65 #include <dev/scsipi/scsi_all.h>
66 #include <dev/scsipi/scsi_message.h>
67 
68 static int	scsipi_complete(struct scsipi_xfer *);
69 static void	scsipi_request_sense(struct scsipi_xfer *);
70 static int	scsipi_enqueue(struct scsipi_xfer *);
71 static void	scsipi_run_queue(struct scsipi_channel *chan);
72 
73 static void	scsipi_completion_thread(void *);
74 
75 static void	scsipi_get_tag(struct scsipi_xfer *);
76 static void	scsipi_put_tag(struct scsipi_xfer *);
77 
78 static int	scsipi_get_resource(struct scsipi_channel *);
79 static void	scsipi_put_resource(struct scsipi_channel *);
80 
81 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
82 		    struct scsipi_max_openings *);
83 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
84 		    struct scsipi_xfer_mode *);
85 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
86 
87 static struct pool scsipi_xfer_pool;
88 
89 /*
90  * scsipi_init:
91  *
92  *	Called when a scsibus or atapibus is attached to the system
93  *	to initialize shared data structures.
94  */
95 void
96 scsipi_init(void)
97 {
98 	static int scsipi_init_done;
99 
100 	if (scsipi_init_done)
101 		return;
102 	scsipi_init_done = 1;
103 
104 	/* Initialize the scsipi_xfer pool. */
105 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
106 	    0, 0, "scxspl", NULL);
107 	if (pool_prime(&scsipi_xfer_pool,
108 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
109 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
110 	}
111 }
112 
113 /*
114  * scsipi_channel_init:
115  *
116  *	Initialize a scsipi_channel when it is attached.
117  */
118 int
119 scsipi_channel_init(struct scsipi_channel *chan)
120 {
121 	int i;
122 
123 	/* Initialize shared data. */
124 	scsipi_init();
125 
126 	/* Initialize the queues. */
127 	TAILQ_INIT(&chan->chan_queue);
128 	TAILQ_INIT(&chan->chan_complete);
129 
130 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
131 		LIST_INIT(&chan->chan_periphtab[i]);
132 
133 	/*
134 	 * Create the asynchronous completion thread.
135 	 */
136 	kthread_create(scsipi_create_completion_thread, chan);
137 	return (0);
138 }
139 
140 /*
141  * scsipi_channel_shutdown:
142  *
143  *	Shutdown a scsipi_channel.
144  */
145 void
146 scsipi_channel_shutdown(struct scsipi_channel *chan)
147 {
148 
149 	/*
150 	 * Shut down the completion thread.
151 	 */
152 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
153 	wakeup(&chan->chan_complete);
154 
155 	/*
156 	 * Now wait for the thread to exit.
157 	 */
158 	while (chan->chan_thread != NULL)
159 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
160 }
161 
162 static uint32_t
163 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
164 {
165 	uint32_t hash;
166 
167 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
168 	hash = hash32_buf(&l, sizeof(l), hash);
169 
170 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
171 }
172 
173 /*
174  * scsipi_insert_periph:
175  *
176  *	Insert a periph into the channel.
177  */
178 void
179 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
180 {
181 	uint32_t hash;
182 	int s;
183 
184 	hash = scsipi_chan_periph_hash(periph->periph_target,
185 	    periph->periph_lun);
186 
187 	s = splbio();
188 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
189 	splx(s);
190 }
191 
192 /*
193  * scsipi_remove_periph:
194  *
195  *	Remove a periph from the channel.
196  */
197 void
198 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
199 {
200 	int s;
201 
202 	s = splbio();
203 	LIST_REMOVE(periph, periph_hash);
204 	splx(s);
205 }
206 
207 /*
208  * scsipi_lookup_periph:
209  *
210  *	Lookup a periph on the specified channel.
211  */
212 struct scsipi_periph *
213 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
214 {
215 	struct scsipi_periph *periph;
216 	uint32_t hash;
217 	int s;
218 
219 	if (target >= chan->chan_ntargets ||
220 	    lun >= chan->chan_nluns)
221 		return (NULL);
222 
223 	hash = scsipi_chan_periph_hash(target, lun);
224 
225 	s = splbio();
226 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
227 		if (periph->periph_target == target &&
228 		    periph->periph_lun == lun)
229 			break;
230 	}
231 	splx(s);
232 
233 	return (periph);
234 }
235 
236 /*
237  * scsipi_get_resource:
238  *
239  *	Allocate a single xfer `resource' from the channel.
240  *
241  *	NOTE: Must be called at splbio().
242  */
243 static int
244 scsipi_get_resource(struct scsipi_channel *chan)
245 {
246 	struct scsipi_adapter *adapt = chan->chan_adapter;
247 
248 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
249 		if (chan->chan_openings > 0) {
250 			chan->chan_openings--;
251 			return (1);
252 		}
253 		return (0);
254 	}
255 
256 	if (adapt->adapt_openings > 0) {
257 		adapt->adapt_openings--;
258 		return (1);
259 	}
260 	return (0);
261 }
262 
263 /*
264  * scsipi_grow_resources:
265  *
266  *	Attempt to grow resources for a channel.  If this succeeds,
267  *	we allocate one for our caller.
268  *
269  *	NOTE: Must be called at splbio().
270  */
271 static __inline int
272 scsipi_grow_resources(struct scsipi_channel *chan)
273 {
274 
275 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
276 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
277 			scsipi_adapter_request(chan,
278 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
279 			return (scsipi_get_resource(chan));
280 		}
281 		/*
282 		 * ask the channel thread to do it. It'll have to thaw the
283 		 * queue
284 		 */
285 		scsipi_channel_freeze(chan, 1);
286 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
287 		wakeup(&chan->chan_complete);
288 		return (0);
289 	}
290 
291 	return (0);
292 }
293 
294 /*
295  * scsipi_put_resource:
296  *
297  *	Free a single xfer `resource' to the channel.
298  *
299  *	NOTE: Must be called at splbio().
300  */
301 static void
302 scsipi_put_resource(struct scsipi_channel *chan)
303 {
304 	struct scsipi_adapter *adapt = chan->chan_adapter;
305 
306 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
307 		chan->chan_openings++;
308 	else
309 		adapt->adapt_openings++;
310 }
311 
312 /*
313  * scsipi_get_tag:
314  *
315  *	Get a tag ID for the specified xfer.
316  *
317  *	NOTE: Must be called at splbio().
318  */
319 static void
320 scsipi_get_tag(struct scsipi_xfer *xs)
321 {
322 	struct scsipi_periph *periph = xs->xs_periph;
323 	int bit, tag;
324 	u_int word;
325 
326 	bit = 0;	/* XXX gcc */
327 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
328 		bit = ffs(periph->periph_freetags[word]);
329 		if (bit != 0)
330 			break;
331 	}
332 #ifdef DIAGNOSTIC
333 	if (word == PERIPH_NTAGWORDS) {
334 		scsipi_printaddr(periph);
335 		printf("no free tags\n");
336 		panic("scsipi_get_tag");
337 	}
338 #endif
339 
340 	bit -= 1;
341 	periph->periph_freetags[word] &= ~(1 << bit);
342 	tag = (word << 5) | bit;
343 
344 	/* XXX Should eventually disallow this completely. */
345 	if (tag >= periph->periph_openings) {
346 		scsipi_printaddr(periph);
347 		printf("WARNING: tag %d greater than available openings %d\n",
348 		    tag, periph->periph_openings);
349 	}
350 
351 	xs->xs_tag_id = tag;
352 }
353 
354 /*
355  * scsipi_put_tag:
356  *
357  *	Put the tag ID for the specified xfer back into the pool.
358  *
359  *	NOTE: Must be called at splbio().
360  */
361 static void
362 scsipi_put_tag(struct scsipi_xfer *xs)
363 {
364 	struct scsipi_periph *periph = xs->xs_periph;
365 	int word, bit;
366 
367 	word = xs->xs_tag_id >> 5;
368 	bit = xs->xs_tag_id & 0x1f;
369 
370 	periph->periph_freetags[word] |= (1 << bit);
371 }
372 
373 /*
374  * scsipi_get_xs:
375  *
376  *	Allocate an xfer descriptor and associate it with the
377  *	specified peripherial.  If the peripherial has no more
378  *	available command openings, we either block waiting for
379  *	one to become available, or fail.
380  */
381 struct scsipi_xfer *
382 scsipi_get_xs(struct scsipi_periph *periph, int flags)
383 {
384 	struct scsipi_xfer *xs;
385 	int s;
386 
387 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
388 
389 	/*
390 	 * If we're cold, make sure we poll.
391 	 */
392 	if (cold)
393 		flags |= XS_CTL_NOSLEEP | XS_CTL_POLL;
394 
395 #ifdef DIAGNOSTIC
396 	/*
397 	 * URGENT commands can never be ASYNC.
398 	 */
399 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
400 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
401 		scsipi_printaddr(periph);
402 		printf("URGENT and ASYNC\n");
403 		panic("scsipi_get_xs");
404 	}
405 #endif
406 
407 	s = splbio();
408 	/*
409 	 * Wait for a command opening to become available.  Rules:
410 	 *
411 	 *	- All xfers must wait for an available opening.
412 	 *	  Exception: URGENT xfers can proceed when
413 	 *	  active == openings, because we use the opening
414 	 *	  of the command we're recovering for.
415 	 *	- if the periph has sense pending, only URGENT & REQSENSE
416 	 *	  xfers may proceed.
417 	 *
418 	 *	- If the periph is recovering, only URGENT xfers may
419 	 *	  proceed.
420 	 *
421 	 *	- If the periph is currently executing a recovery
422 	 *	  command, URGENT commands must block, because only
423 	 *	  one recovery command can execute at a time.
424 	 */
425 	for (;;) {
426 		if (flags & XS_CTL_URGENT) {
427 			if (periph->periph_active > periph->periph_openings)
428 				goto wait_for_opening;
429 			if (periph->periph_flags & PERIPH_SENSE) {
430 				if ((flags & XS_CTL_REQSENSE) == 0)
431 					goto wait_for_opening;
432 			} else {
433 				if ((periph->periph_flags &
434 				    PERIPH_RECOVERY_ACTIVE) != 0)
435 					goto wait_for_opening;
436 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
437 			}
438 			break;
439 		}
440 		if (periph->periph_active >= periph->periph_openings ||
441 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
442 			goto wait_for_opening;
443 		periph->periph_active++;
444 		break;
445 
446  wait_for_opening:
447 		if (flags & XS_CTL_NOSLEEP) {
448 			splx(s);
449 			return (NULL);
450 		}
451 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
452 		periph->periph_flags |= PERIPH_WAITING;
453 		(void) tsleep(periph, PRIBIO, "getxs", 0);
454 	}
455 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
456 	xs = pool_get(&scsipi_xfer_pool,
457 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
458 	if (xs == NULL) {
459 		if (flags & XS_CTL_URGENT) {
460 			if ((flags & XS_CTL_REQSENSE) == 0)
461 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
462 		} else
463 			periph->periph_active--;
464 		scsipi_printaddr(periph);
465 		printf("unable to allocate %sscsipi_xfer\n",
466 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
467 	}
468 	splx(s);
469 
470 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
471 
472 	if (xs != NULL) {
473 		memset(xs, 0, sizeof(*xs));
474 		callout_init(&xs->xs_callout);
475 		xs->xs_periph = periph;
476 		xs->xs_control = flags;
477 		xs->xs_status = 0;
478 		s = splbio();
479 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
480 		splx(s);
481 	}
482 	return (xs);
483 }
484 
485 /*
486  * scsipi_put_xs:
487  *
488  *	Release an xfer descriptor, decreasing the outstanding command
489  *	count for the peripherial.  If there is a thread waiting for
490  *	an opening, wake it up.  If not, kick any queued I/O the
491  *	peripherial may have.
492  *
493  *	NOTE: Must be called at splbio().
494  */
495 void
496 scsipi_put_xs(struct scsipi_xfer *xs)
497 {
498 	struct scsipi_periph *periph = xs->xs_periph;
499 	int flags = xs->xs_control;
500 
501 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
502 
503 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
504 	pool_put(&scsipi_xfer_pool, xs);
505 
506 #ifdef DIAGNOSTIC
507 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
508 	    periph->periph_active == 0) {
509 		scsipi_printaddr(periph);
510 		printf("recovery without a command to recovery for\n");
511 		panic("scsipi_put_xs");
512 	}
513 #endif
514 
515 	if (flags & XS_CTL_URGENT) {
516 		if ((flags & XS_CTL_REQSENSE) == 0)
517 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
518 	} else
519 		periph->periph_active--;
520 	if (periph->periph_active == 0 &&
521 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
522 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
523 		wakeup(&periph->periph_active);
524 	}
525 
526 	if (periph->periph_flags & PERIPH_WAITING) {
527 		periph->periph_flags &= ~PERIPH_WAITING;
528 		wakeup(periph);
529 	} else {
530 		if (periph->periph_switch->psw_start != NULL &&
531 		    (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
532 			SC_DEBUG(periph, SCSIPI_DB2,
533 			    ("calling private start()\n"));
534 			(*periph->periph_switch->psw_start)(periph);
535 		}
536 	}
537 }
538 
539 /*
540  * scsipi_channel_freeze:
541  *
542  *	Freeze a channel's xfer queue.
543  */
544 void
545 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
546 {
547 	int s;
548 
549 	s = splbio();
550 	chan->chan_qfreeze += count;
551 	splx(s);
552 }
553 
554 /*
555  * scsipi_channel_thaw:
556  *
557  *	Thaw a channel's xfer queue.
558  */
559 void
560 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
561 {
562 	int s;
563 
564 	s = splbio();
565 	chan->chan_qfreeze -= count;
566 	/*
567 	 * Don't let the freeze count go negative.
568 	 *
569 	 * Presumably the adapter driver could keep track of this,
570 	 * but it might just be easier to do this here so as to allow
571 	 * multiple callers, including those outside the adapter driver.
572 	 */
573 	if (chan->chan_qfreeze < 0) {
574 		chan->chan_qfreeze = 0;
575 	}
576 	splx(s);
577 	/*
578 	 * Kick the channel's queue here.  Note, we may be running in
579 	 * interrupt context (softclock or HBA's interrupt), so the adapter
580 	 * driver had better not sleep.
581 	 */
582 	if (chan->chan_qfreeze == 0)
583 		scsipi_run_queue(chan);
584 }
585 
586 /*
587  * scsipi_channel_timed_thaw:
588  *
589  *	Thaw a channel after some time has expired. This will also
590  * 	run the channel's queue if the freeze count has reached 0.
591  */
592 void
593 scsipi_channel_timed_thaw(void *arg)
594 {
595 	struct scsipi_channel *chan = arg;
596 
597 	scsipi_channel_thaw(chan, 1);
598 }
599 
600 /*
601  * scsipi_periph_freeze:
602  *
603  *	Freeze a device's xfer queue.
604  */
605 void
606 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
607 {
608 	int s;
609 
610 	s = splbio();
611 	periph->periph_qfreeze += count;
612 	splx(s);
613 }
614 
615 /*
616  * scsipi_periph_thaw:
617  *
618  *	Thaw a device's xfer queue.
619  */
620 void
621 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
622 {
623 	int s;
624 
625 	s = splbio();
626 	periph->periph_qfreeze -= count;
627 #ifdef DIAGNOSTIC
628 	if (periph->periph_qfreeze < 0) {
629 		static const char pc[] = "periph freeze count < 0";
630 		scsipi_printaddr(periph);
631 		printf("%s\n", pc);
632 		panic(pc);
633 	}
634 #endif
635 	if (periph->periph_qfreeze == 0 &&
636 	    (periph->periph_flags & PERIPH_WAITING) != 0)
637 		wakeup(periph);
638 	splx(s);
639 }
640 
641 /*
642  * scsipi_periph_timed_thaw:
643  *
644  *	Thaw a device after some time has expired.
645  */
646 void
647 scsipi_periph_timed_thaw(void *arg)
648 {
649 	int s;
650 	struct scsipi_periph *periph = arg;
651 
652 	callout_stop(&periph->periph_callout);
653 
654 	s = splbio();
655 	scsipi_periph_thaw(periph, 1);
656 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
657 		/*
658 		 * Kick the channel's queue here.  Note, we're running in
659 		 * interrupt context (softclock), so the adapter driver
660 		 * had better not sleep.
661 		 */
662 		scsipi_run_queue(periph->periph_channel);
663 	} else {
664 		/*
665 		 * Tell the completion thread to kick the channel's queue here.
666 		 */
667 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
668 		wakeup(&periph->periph_channel->chan_complete);
669 	}
670 	splx(s);
671 }
672 
673 /*
674  * scsipi_wait_drain:
675  *
676  *	Wait for a periph's pending xfers to drain.
677  */
678 void
679 scsipi_wait_drain(struct scsipi_periph *periph)
680 {
681 	int s;
682 
683 	s = splbio();
684 	while (periph->periph_active != 0) {
685 		periph->periph_flags |= PERIPH_WAITDRAIN;
686 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
687 	}
688 	splx(s);
689 }
690 
691 /*
692  * scsipi_kill_pending:
693  *
694  *	Kill off all pending xfers for a periph.
695  *
696  *	NOTE: Must be called at splbio().
697  */
698 void
699 scsipi_kill_pending(struct scsipi_periph *periph)
700 {
701 
702 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
703 	scsipi_wait_drain(periph);
704 }
705 
706 /*
707  * scsipi_print_cdb:
708  * prints a command descriptor block (for debug purpose, error messages,
709  * SCSIPI_VERBOSE, ...)
710  */
711 void
712 scsipi_print_cdb(struct scsipi_generic *cmd)
713 {
714 	int i, j;
715 
716  	printf("0x%02x", cmd->opcode);
717 
718  	switch (CDB_GROUPID(cmd->opcode)) {
719  	case CDB_GROUPID_0:
720  		j = CDB_GROUP0;
721  		break;
722  	case CDB_GROUPID_1:
723  		j = CDB_GROUP1;
724  		break;
725  	case CDB_GROUPID_2:
726  		j = CDB_GROUP2;
727  		break;
728  	case CDB_GROUPID_3:
729  		j = CDB_GROUP3;
730  		break;
731  	case CDB_GROUPID_4:
732  		j = CDB_GROUP4;
733  		break;
734  	case CDB_GROUPID_5:
735  		j = CDB_GROUP5;
736  		break;
737  	case CDB_GROUPID_6:
738  		j = CDB_GROUP6;
739  		break;
740  	case CDB_GROUPID_7:
741  		j = CDB_GROUP7;
742  		break;
743  	default:
744  		j = 0;
745  	}
746  	if (j == 0)
747  		j = sizeof (cmd->bytes);
748  	for (i = 0; i < j-1; i++) /* already done the opcode */
749  		printf(" %02x", cmd->bytes[i]);
750 }
751 
752 /*
753  * scsipi_interpret_sense:
754  *
755  *	Look at the returned sense and act on the error, determining
756  *	the unix error number to pass back.  (0 = report no error)
757  *
758  *	NOTE: If we return ERESTART, we are expected to haved
759  *	thawed the device!
760  *
761  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
762  */
763 int
764 scsipi_interpret_sense(struct scsipi_xfer *xs)
765 {
766 	struct scsipi_sense_data *sense;
767 	struct scsipi_periph *periph = xs->xs_periph;
768 	u_int8_t key;
769 	int error;
770 #ifndef	SCSIVERBOSE
771 	u_int32_t info;
772 	static char *error_mes[] = {
773 		"soft error (corrected)",
774 		"not ready", "medium error",
775 		"non-media hardware failure", "illegal request",
776 		"unit attention", "readonly device",
777 		"no data found", "vendor unique",
778 		"copy aborted", "command aborted",
779 		"search returned equal", "volume overflow",
780 		"verify miscompare", "unknown error key"
781 	};
782 #endif
783 
784 	sense = &xs->sense.scsi_sense;
785 #ifdef SCSIPI_DEBUG
786 	if (periph->periph_flags & SCSIPI_DB1) {
787 		int count;
788 		scsipi_printaddr(periph);
789 		printf(" sense debug information:\n");
790 		printf("\tcode 0x%x valid 0x%x\n",
791 			sense->error_code & SSD_ERRCODE,
792 			sense->error_code & SSD_ERRCODE_VALID ? 1 : 0);
793 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
794 			sense->segment,
795 			sense->flags & SSD_KEY,
796 			sense->flags & SSD_ILI ? 1 : 0,
797 			sense->flags & SSD_EOM ? 1 : 0,
798 			sense->flags & SSD_FILEMARK ? 1 : 0);
799 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
800 			"extra bytes\n",
801 			sense->info[0],
802 			sense->info[1],
803 			sense->info[2],
804 			sense->info[3],
805 			sense->extra_len);
806 		printf("\textra: ");
807 		for (count = 0; count < ADD_BYTES_LIM(sense); count++)
808 			printf("0x%x ", sense->cmd_spec_info[count]);
809 		printf("\n");
810 	}
811 #endif
812 
813 	/*
814 	 * If the periph has it's own error handler, call it first.
815 	 * If it returns a legit error value, return that, otherwise
816 	 * it wants us to continue with normal error processing.
817 	 */
818 	if (periph->periph_switch->psw_error != NULL) {
819 		SC_DEBUG(periph, SCSIPI_DB2,
820 		    ("calling private err_handler()\n"));
821 		error = (*periph->periph_switch->psw_error)(xs);
822 		if (error != EJUSTRETURN)
823 			return (error);
824 	}
825 	/* otherwise use the default */
826 	switch (sense->error_code & SSD_ERRCODE) {
827 
828 		/*
829 		 * Old SCSI-1 and SASI devices respond with
830 		 * codes other than 70.
831 		 */
832 	case 0x00:		/* no error (command completed OK) */
833 		return (0);
834 	case 0x04:		/* drive not ready after it was selected */
835 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
836 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
837 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
838 			return (0);
839 		/* XXX - display some sort of error here? */
840 		return (EIO);
841 	case 0x20:		/* invalid command */
842 		if ((xs->xs_control &
843 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
844 			return (0);
845 		return (EINVAL);
846 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
847 		return (EACCES);
848 
849 		/*
850 		 * If it's code 70, use the extended stuff and
851 		 * interpret the key
852 		 */
853 	case 0x71:		/* delayed error */
854 		scsipi_printaddr(periph);
855 		key = sense->flags & SSD_KEY;
856 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
857 		/* FALLTHROUGH */
858 	case 0x70:
859 #ifndef	SCSIVERBOSE
860 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0)
861 			info = _4btol(sense->info);
862 		else
863 			info = 0;
864 #endif
865 		key = sense->flags & SSD_KEY;
866 
867 		switch (key) {
868 		case SKEY_NO_SENSE:
869 		case SKEY_RECOVERED_ERROR:
870 			if (xs->resid == xs->datalen && xs->datalen) {
871 				/*
872 				 * Why is this here?
873 				 */
874 				xs->resid = 0;	/* not short read */
875 			}
876 		case SKEY_EQUAL:
877 			error = 0;
878 			break;
879 		case SKEY_NOT_READY:
880 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
881 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
882 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
883 				return (0);
884 			if (sense->add_sense_code == 0x3A) {
885 				error = ENODEV; /* Medium not present */
886 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
887 					return (error);
888 			} else
889 				error = EIO;
890 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
891 				return (error);
892 			break;
893 		case SKEY_ILLEGAL_REQUEST:
894 			if ((xs->xs_control &
895 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
896 				return (0);
897 			/*
898 			 * Handle the case where a device reports
899 			 * Logical Unit Not Supported during discovery.
900 			 */
901 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
902 			    sense->add_sense_code == 0x25 &&
903 			    sense->add_sense_code_qual == 0x00)
904 				return (EINVAL);
905 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
906 				return (EIO);
907 			error = EINVAL;
908 			break;
909 		case SKEY_UNIT_ATTENTION:
910 			if (sense->add_sense_code == 0x29 &&
911 			    sense->add_sense_code_qual == 0x00) {
912 				/* device or bus reset */
913 				return (ERESTART);
914 			}
915 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
916 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
917 			if ((xs->xs_control &
918 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
919 				/* XXX Should reupload any transient state. */
920 				(periph->periph_flags &
921 				 PERIPH_REMOVABLE) == 0) {
922 				return (ERESTART);
923 			}
924 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
925 				return (EIO);
926 			error = EIO;
927 			break;
928 		case SKEY_WRITE_PROTECT:
929 			error = EROFS;
930 			break;
931 		case SKEY_BLANK_CHECK:
932 			error = 0;
933 			break;
934 		case SKEY_ABORTED_COMMAND:
935 			if (xs->xs_retries != 0) {
936 				xs->xs_retries--;
937 				error = ERESTART;
938 			} else
939 				error = EIO;
940 			break;
941 		case SKEY_VOLUME_OVERFLOW:
942 			error = ENOSPC;
943 			break;
944 		default:
945 			error = EIO;
946 			break;
947 		}
948 
949 #ifdef SCSIVERBOSE
950 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
951 			scsipi_print_sense(xs, 0);
952 #else
953 		if (key) {
954 			scsipi_printaddr(periph);
955 			printf("%s", error_mes[key - 1]);
956 			if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
957 				switch (key) {
958 				case SKEY_NOT_READY:
959 				case SKEY_ILLEGAL_REQUEST:
960 				case SKEY_UNIT_ATTENTION:
961 				case SKEY_WRITE_PROTECT:
962 					break;
963 				case SKEY_BLANK_CHECK:
964 					printf(", requested size: %d (decimal)",
965 					    info);
966 					break;
967 				case SKEY_ABORTED_COMMAND:
968 					if (xs->xs_retries)
969 						printf(", retrying");
970 					printf(", cmd 0x%x, info 0x%x",
971 					    xs->cmd->opcode, info);
972 					break;
973 				default:
974 					printf(", info = %d (decimal)", info);
975 				}
976 			}
977 			if (sense->extra_len != 0) {
978 				int n;
979 				printf(", data =");
980 				for (n = 0; n < sense->extra_len; n++)
981 					printf(" %02x",
982 					    sense->cmd_spec_info[n]);
983 			}
984 			printf("\n");
985 		}
986 #endif
987 		return (error);
988 
989 	/*
990 	 * Some other code, just report it
991 	 */
992 	default:
993 #if    defined(SCSIDEBUG) || defined(DEBUG)
994 	{
995 		static char *uc = "undecodable sense error";
996 		int i;
997 		u_int8_t *cptr = (u_int8_t *) sense;
998 		scsipi_printaddr(periph);
999 		if (xs->cmd == &xs->cmdstore) {
1000 			printf("%s for opcode 0x%x, data=",
1001 			    uc, xs->cmdstore.opcode);
1002 		} else {
1003 			printf("%s, data=", uc);
1004 		}
1005 		for (i = 0; i < sizeof (sense); i++)
1006 			printf(" 0x%02x", *(cptr++) & 0xff);
1007 		printf("\n");
1008 	}
1009 #else
1010 		scsipi_printaddr(periph);
1011 		printf("Sense Error Code 0x%x",
1012 			sense->error_code & SSD_ERRCODE);
1013 		if ((sense->error_code & SSD_ERRCODE_VALID) != 0) {
1014 			struct scsipi_sense_data_unextended *usense =
1015 			    (struct scsipi_sense_data_unextended *)sense;
1016 			printf(" at block no. %d (decimal)",
1017 			    _3btol(usense->block));
1018 		}
1019 		printf("\n");
1020 #endif
1021 		return (EIO);
1022 	}
1023 }
1024 
1025 /*
1026  * scsipi_size:
1027  *
1028  *	Find out from the device what its capacity is.
1029  */
1030 u_int64_t
1031 scsipi_size(struct scsipi_periph *periph, int flags)
1032 {
1033 	struct scsipi_read_cap_data rdcap;
1034 	struct scsipi_read_capacity scsipi_cmd;
1035 
1036 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1037 	scsipi_cmd.opcode = READ_CAPACITY;
1038 
1039 	/*
1040 	 * If the command works, interpret the result as a 4 byte
1041 	 * number of blocks
1042 	 */
1043 	if (scsipi_command(periph, NULL, (struct scsipi_generic *)&scsipi_cmd,
1044 	    sizeof(scsipi_cmd), (u_char *)&rdcap, sizeof(rdcap),
1045 	    SCSIPIRETRIES, 20000, NULL,
1046 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1047 		return (0);
1048 
1049 	return (_4btol(rdcap.addr) + 1);
1050 }
1051 
1052 /*
1053  * scsipi_test_unit_ready:
1054  *
1055  *	Issue a `test unit ready' request.
1056  */
1057 int
1058 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1059 {
1060 	int retries;
1061 	struct scsipi_test_unit_ready scsipi_cmd;
1062 
1063 	/* some ATAPI drives don't support TEST_UNIT_READY. Sigh */
1064 	if (periph->periph_quirks & PQUIRK_NOTUR)
1065 		return (0);
1066 
1067 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1068 	scsipi_cmd.opcode = TEST_UNIT_READY;
1069 
1070 	if (flags & XS_CTL_DISCOVERY)
1071 		retries = 0;
1072 	else
1073 		retries = SCSIPIRETRIES;
1074 
1075 	return (scsipi_command(periph, NULL,
1076 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1077 	    0, 0, retries, 10000, NULL, flags));
1078 }
1079 
1080 /*
1081  * scsipi_inquire:
1082  *
1083  *	Ask the device about itself.
1084  */
1085 int
1086 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1087     int flags)
1088 {
1089 	int retries;
1090 	struct scsipi_inquiry scsipi_cmd;
1091 	int error;
1092 
1093 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1094 	scsipi_cmd.opcode = INQUIRY;
1095 
1096 	if (flags & XS_CTL_DISCOVERY)
1097 		retries = 0;
1098 	else
1099 		retries = SCSIPIRETRIES;
1100 
1101 	/*
1102 	 * If we request more data than the device can provide, it SHOULD just
1103 	 * return a short reponse.  However, some devices error with an
1104 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1105 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1106 	 * and sends corrupted CRCs).  To work around this, and to bring our
1107 	 * behavior more in line with other OSes, we do a shorter inquiry,
1108 	 * covering all the SCSI-2 information, first, and then request more
1109 	 * data iff the "additional length" field indicates there is more.
1110 	 * - mycroft, 2003/10/16
1111 	 */
1112 	scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1113 	error = scsipi_command(periph, NULL,
1114 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1115 	    (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2,
1116 	    retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1117 	if (!error && inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1118 		scsipi_cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1119 		error = scsipi_command(periph, NULL,
1120 		    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1121 		    (u_char *) inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3,
1122 		    retries, 10000, NULL, XS_CTL_DATA_IN | flags);
1123 	}
1124 
1125 #ifdef SCSI_OLD_NOINQUIRY
1126 	/*
1127 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1128 	 * This board doesn't support the INQUIRY command at all.
1129 	 */
1130 	if (error == EINVAL || error == EACCES) {
1131 		/*
1132 		 * Conjure up an INQUIRY response.
1133 		 */
1134 		inqbuf->device = (error == EINVAL ?
1135 			 SID_QUAL_LU_PRESENT :
1136 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1137 		inqbuf->dev_qual2 = 0;
1138 		inqbuf->version = 0;
1139 		inqbuf->response_format = SID_FORMAT_SCSI1;
1140 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1141 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1142 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1143 		error = 0;
1144 	}
1145 
1146 	/*
1147 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1148 	 * This board gives an empty response to an INQUIRY command.
1149 	 */
1150 	else if (error == 0 &&
1151 		 inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1152 		 inqbuf->dev_qual2 == 0 &&
1153 		 inqbuf->version == 0 &&
1154 		 inqbuf->response_format == SID_FORMAT_SCSI1) {
1155 		/*
1156 		 * Fill out the INQUIRY response.
1157 		 */
1158 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1159 		inqbuf->dev_qual2 = SID_REMOVABLE;
1160 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1161 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1162 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1163 	}
1164 #endif /* SCSI_OLD_NOINQUIRY */
1165 
1166 	return error;
1167 }
1168 
1169 /*
1170  * scsipi_prevent:
1171  *
1172  *	Prevent or allow the user to remove the media
1173  */
1174 int
1175 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1176 {
1177 	struct scsipi_prevent scsipi_cmd;
1178 
1179 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1180 	scsipi_cmd.opcode = PREVENT_ALLOW;
1181 	scsipi_cmd.how = type;
1182 
1183 	return (scsipi_command(periph, NULL,
1184 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1185 	    0, 0, SCSIPIRETRIES, 5000, NULL, flags));
1186 }
1187 
1188 /*
1189  * scsipi_start:
1190  *
1191  *	Send a START UNIT.
1192  */
1193 int
1194 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1195 {
1196 	struct scsipi_start_stop scsipi_cmd;
1197 
1198 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1199 	scsipi_cmd.opcode = START_STOP;
1200 	scsipi_cmd.byte2 = 0x00;
1201 	scsipi_cmd.how = type;
1202 
1203 	return (scsipi_command(periph, NULL,
1204 	    (struct scsipi_generic *) &scsipi_cmd, sizeof(scsipi_cmd),
1205 	    0, 0, SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000,
1206 	    NULL, flags));
1207 }
1208 
1209 /*
1210  * scsipi_mode_sense, scsipi_mode_sense_big:
1211  *	get a sense page from a device
1212  */
1213 
1214 int
1215 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1216     struct scsipi_mode_header *data, int len, int flags, int retries,
1217     int timeout)
1218 {
1219 	struct scsipi_mode_sense scsipi_cmd;
1220 	int error;
1221 
1222 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1223 	scsipi_cmd.opcode = MODE_SENSE;
1224 	scsipi_cmd.byte2 = byte2;
1225 	scsipi_cmd.page = page;
1226 	scsipi_cmd.length = len & 0xff;
1227 	error = scsipi_command(periph, NULL,
1228 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1229 	    (void *)data, len, retries, timeout, NULL,
1230 	    flags | XS_CTL_DATA_IN);
1231 	SC_DEBUG(periph, SCSIPI_DB2,
1232 	    ("scsipi_mode_sense: error=%d\n", error));
1233 	return (error);
1234 }
1235 
1236 int
1237 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1238     struct scsipi_mode_header_big *data, int len, int flags, int retries,
1239     int timeout)
1240 {
1241 	struct scsipi_mode_sense_big scsipi_cmd;
1242 	int error;
1243 
1244 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1245 	scsipi_cmd.opcode = MODE_SENSE_BIG;
1246 	scsipi_cmd.byte2 = byte2;
1247 	scsipi_cmd.page = page;
1248 	_lto2b(len, scsipi_cmd.length);
1249 	error = scsipi_command(periph, NULL,
1250 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1251 	    (void *)data, len, retries, timeout, NULL,
1252 	    flags | XS_CTL_DATA_IN);
1253 	SC_DEBUG(periph, SCSIPI_DB2,
1254 	    ("scsipi_mode_sense_big: error=%d\n", error));
1255 	return (error);
1256 }
1257 
1258 int
1259 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1260     struct scsipi_mode_header *data, int len, int flags, int retries,
1261     int timeout)
1262 {
1263 	struct scsipi_mode_select scsipi_cmd;
1264 	int error;
1265 
1266 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1267 	scsipi_cmd.opcode = MODE_SELECT;
1268 	scsipi_cmd.byte2 = byte2;
1269 	scsipi_cmd.length = len & 0xff;
1270 	error = scsipi_command(periph, NULL,
1271 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1272 	    (void *)data, len, retries, timeout, NULL,
1273 	    flags | XS_CTL_DATA_OUT);
1274 	SC_DEBUG(periph, SCSIPI_DB2,
1275 	    ("scsipi_mode_select: error=%d\n", error));
1276 	return (error);
1277 }
1278 
1279 int
1280 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1281     struct scsipi_mode_header_big *data, int len, int flags, int retries,
1282     int timeout)
1283 {
1284 	struct scsipi_mode_select_big scsipi_cmd;
1285 	int error;
1286 
1287 	memset(&scsipi_cmd, 0, sizeof(scsipi_cmd));
1288 	scsipi_cmd.opcode = MODE_SELECT_BIG;
1289 	scsipi_cmd.byte2 = byte2;
1290 	_lto2b(len, scsipi_cmd.length);
1291 	error = scsipi_command(periph, NULL,
1292 	    (struct scsipi_generic *)&scsipi_cmd, sizeof(scsipi_cmd),
1293 	    (void *)data, len, retries, timeout, NULL,
1294 	    flags | XS_CTL_DATA_OUT);
1295 	SC_DEBUG(periph, SCSIPI_DB2,
1296 	    ("scsipi_mode_select: error=%d\n", error));
1297 	return (error);
1298 }
1299 
1300 /*
1301  * scsipi_done:
1302  *
1303  *	This routine is called by an adapter's interrupt handler when
1304  *	an xfer is completed.
1305  */
1306 void
1307 scsipi_done(struct scsipi_xfer *xs)
1308 {
1309 	struct scsipi_periph *periph = xs->xs_periph;
1310 	struct scsipi_channel *chan = periph->periph_channel;
1311 	int s, freezecnt;
1312 
1313 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1314 #ifdef SCSIPI_DEBUG
1315 	if (periph->periph_dbflags & SCSIPI_DB1)
1316 		show_scsipi_cmd(xs);
1317 #endif
1318 
1319 	s = splbio();
1320 	/*
1321 	 * The resource this command was using is now free.
1322 	 */
1323 	scsipi_put_resource(chan);
1324 	xs->xs_periph->periph_sent--;
1325 
1326 	/*
1327 	 * If the command was tagged, free the tag.
1328 	 */
1329 	if (XS_CTL_TAGTYPE(xs) != 0)
1330 		scsipi_put_tag(xs);
1331 	else
1332 		periph->periph_flags &= ~PERIPH_UNTAG;
1333 
1334 	/* Mark the command as `done'. */
1335 	xs->xs_status |= XS_STS_DONE;
1336 
1337 #ifdef DIAGNOSTIC
1338 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1339 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1340 		panic("scsipi_done: ASYNC and POLL");
1341 #endif
1342 
1343 	/*
1344 	 * If the xfer had an error of any sort, freeze the
1345 	 * periph's queue.  Freeze it again if we were requested
1346 	 * to do so in the xfer.
1347 	 */
1348 	freezecnt = 0;
1349 	if (xs->error != XS_NOERROR)
1350 		freezecnt++;
1351 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1352 		freezecnt++;
1353 	if (freezecnt != 0)
1354 		scsipi_periph_freeze(periph, freezecnt);
1355 
1356 	/*
1357 	 * record the xfer with a pending sense, in case a SCSI reset is
1358 	 * received before the thread is waked up.
1359 	 */
1360 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1361 		periph->periph_flags |= PERIPH_SENSE;
1362 		periph->periph_xscheck = xs;
1363 	}
1364 
1365 	/*
1366 	 * If this was an xfer that was not to complete asynchronously,
1367 	 * let the requesting thread perform error checking/handling
1368 	 * in its context.
1369 	 */
1370 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1371 		splx(s);
1372 		/*
1373 		 * If it's a polling job, just return, to unwind the
1374 		 * call graph.  We don't need to restart the queue,
1375 		 * because pollings jobs are treated specially, and
1376 		 * are really only used during crash dumps anyway
1377 		 * (XXX or during boot-time autconfiguration of
1378 		 * ATAPI devices).
1379 		 */
1380 		if (xs->xs_control & XS_CTL_POLL)
1381 			return;
1382 		wakeup(xs);
1383 		goto out;
1384 	}
1385 
1386 	/*
1387 	 * Catch the extremely common case of I/O completing
1388 	 * without error; no use in taking a context switch
1389 	 * if we can handle it in interrupt context.
1390 	 */
1391 	if (xs->error == XS_NOERROR) {
1392 		splx(s);
1393 		(void) scsipi_complete(xs);
1394 		goto out;
1395 	}
1396 
1397 	/*
1398 	 * There is an error on this xfer.  Put it on the channel's
1399 	 * completion queue, and wake up the completion thread.
1400 	 */
1401 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1402 	splx(s);
1403 	wakeup(&chan->chan_complete);
1404 
1405  out:
1406 	/*
1407 	 * If there are more xfers on the channel's queue, attempt to
1408 	 * run them.
1409 	 */
1410 	scsipi_run_queue(chan);
1411 }
1412 
1413 /*
1414  * scsipi_complete:
1415  *
1416  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1417  *
1418  *	NOTE: This routine MUST be called with valid thread context
1419  *	except for the case where the following two conditions are
1420  *	true:
1421  *
1422  *		xs->error == XS_NOERROR
1423  *		XS_CTL_ASYNC is set in xs->xs_control
1424  *
1425  *	The semantics of this routine can be tricky, so here is an
1426  *	explanation:
1427  *
1428  *		0		Xfer completed successfully.
1429  *
1430  *		ERESTART	Xfer had an error, but was restarted.
1431  *
1432  *		anything else	Xfer had an error, return value is Unix
1433  *				errno.
1434  *
1435  *	If the return value is anything but ERESTART:
1436  *
1437  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1438  *		  the pool.
1439  *		- If there is a buf associated with the xfer,
1440  *		  it has been biodone()'d.
1441  */
1442 static int
1443 scsipi_complete(struct scsipi_xfer *xs)
1444 {
1445 	struct scsipi_periph *periph = xs->xs_periph;
1446 	struct scsipi_channel *chan = periph->periph_channel;
1447 	struct buf *bp;
1448 	int error, s;
1449 
1450 #ifdef DIAGNOSTIC
1451 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1452 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1453 #endif
1454 	/*
1455 	 * If command terminated with a CHECK CONDITION, we need to issue a
1456 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1457 	 * we'll have the real status.
1458 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1459 	 * for this command.
1460 	 */
1461 	s = splbio();
1462 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1463 		/* request sense for a request sense ? */
1464 		if (xs->xs_control & XS_CTL_REQSENSE) {
1465 			scsipi_printaddr(periph);
1466 			printf("request sense for a request sense ?\n");
1467 			/* XXX maybe we should reset the device ? */
1468 			/* we've been frozen because xs->error != XS_NOERROR */
1469 			scsipi_periph_thaw(periph, 1);
1470 			splx(s);
1471 			if (xs->resid < xs->datalen) {
1472 				printf("we read %d bytes of sense anyway:\n",
1473 				    xs->datalen - xs->resid);
1474 #ifdef SCSIVERBOSE
1475 				scsipi_print_sense_data((void *)xs->data, 0);
1476 #endif
1477 			}
1478 			return EINVAL;
1479 		}
1480 		scsipi_request_sense(xs);
1481 	}
1482 	splx(s);
1483 
1484 	/*
1485 	 * If it's a user level request, bypass all usual completion
1486 	 * processing, let the user work it out..
1487 	 */
1488 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1489 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1490 		if (xs->error != XS_NOERROR)
1491 			scsipi_periph_thaw(periph, 1);
1492 		scsipi_user_done(xs);
1493 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1494 		return 0;
1495 	}
1496 
1497 	switch (xs->error) {
1498 	case XS_NOERROR:
1499 		error = 0;
1500 		break;
1501 
1502 	case XS_SENSE:
1503 	case XS_SHORTSENSE:
1504 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1505 		break;
1506 
1507 	case XS_RESOURCE_SHORTAGE:
1508 		/*
1509 		 * XXX Should freeze channel's queue.
1510 		 */
1511 		scsipi_printaddr(periph);
1512 		printf("adapter resource shortage\n");
1513 		/* FALLTHROUGH */
1514 
1515 	case XS_BUSY:
1516 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1517 			struct scsipi_max_openings mo;
1518 
1519 			/*
1520 			 * We set the openings to active - 1, assuming that
1521 			 * the command that got us here is the first one that
1522 			 * can't fit into the device's queue.  If that's not
1523 			 * the case, I guess we'll find out soon enough.
1524 			 */
1525 			mo.mo_target = periph->periph_target;
1526 			mo.mo_lun = periph->periph_lun;
1527 			if (periph->periph_active < periph->periph_openings)
1528 				mo.mo_openings = periph->periph_active - 1;
1529 			else
1530 				mo.mo_openings = periph->periph_openings - 1;
1531 #ifdef DIAGNOSTIC
1532 			if (mo.mo_openings < 0) {
1533 				scsipi_printaddr(periph);
1534 				printf("QUEUE FULL resulted in < 0 openings\n");
1535 				panic("scsipi_done");
1536 			}
1537 #endif
1538 			if (mo.mo_openings == 0) {
1539 				scsipi_printaddr(periph);
1540 				printf("QUEUE FULL resulted in 0 openings\n");
1541 				mo.mo_openings = 1;
1542 			}
1543 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1544 			error = ERESTART;
1545 		} else if (xs->xs_retries != 0) {
1546 			xs->xs_retries--;
1547 			/*
1548 			 * Wait one second, and try again.
1549 			 */
1550 			if ((xs->xs_control & XS_CTL_POLL) ||
1551 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1552 				delay(1000000);
1553 			} else if (!callout_pending(&periph->periph_callout)) {
1554 				scsipi_periph_freeze(periph, 1);
1555 				callout_reset(&periph->periph_callout,
1556 				    hz, scsipi_periph_timed_thaw, periph);
1557 			}
1558 			error = ERESTART;
1559 		} else
1560 			error = EBUSY;
1561 		break;
1562 
1563 	case XS_REQUEUE:
1564 		error = ERESTART;
1565 		break;
1566 
1567 	case XS_SELTIMEOUT:
1568 	case XS_TIMEOUT:
1569 		/*
1570 		 * If the device hasn't gone away, honor retry counts.
1571 		 *
1572 		 * Note that if we're in the middle of probing it,
1573 		 * it won't be found because it isn't here yet so
1574 		 * we won't honor the retry count in that case.
1575 		 */
1576 		if (scsipi_lookup_periph(chan, periph->periph_target,
1577 		    periph->periph_lun) && xs->xs_retries != 0) {
1578 			xs->xs_retries--;
1579 			error = ERESTART;
1580 		} else
1581 			error = EIO;
1582 		break;
1583 
1584 	case XS_RESET:
1585 		if (xs->xs_control & XS_CTL_REQSENSE) {
1586 			/*
1587 			 * request sense interrupted by reset: signal it
1588 			 * with EINTR return code.
1589 			 */
1590 			error = EINTR;
1591 		} else {
1592 			if (xs->xs_retries != 0) {
1593 				xs->xs_retries--;
1594 				error = ERESTART;
1595 			} else
1596 				error = EIO;
1597 		}
1598 		break;
1599 
1600 	case XS_DRIVER_STUFFUP:
1601 		scsipi_printaddr(periph);
1602 		printf("generic HBA error\n");
1603 		error = EIO;
1604 		break;
1605 	default:
1606 		scsipi_printaddr(periph);
1607 		printf("invalid return code from adapter: %d\n", xs->error);
1608 		error = EIO;
1609 		break;
1610 	}
1611 
1612 	s = splbio();
1613 	if (error == ERESTART) {
1614 		/*
1615 		 * If we get here, the periph has been thawed and frozen
1616 		 * again if we had to issue recovery commands.  Alternatively,
1617 		 * it may have been frozen again and in a timed thaw.  In
1618 		 * any case, we thaw the periph once we re-enqueue the
1619 		 * command.  Once the periph is fully thawed, it will begin
1620 		 * operation again.
1621 		 */
1622 		xs->error = XS_NOERROR;
1623 		xs->status = SCSI_OK;
1624 		xs->xs_status &= ~XS_STS_DONE;
1625 		xs->xs_requeuecnt++;
1626 		error = scsipi_enqueue(xs);
1627 		if (error == 0) {
1628 			scsipi_periph_thaw(periph, 1);
1629 			splx(s);
1630 			return (ERESTART);
1631 		}
1632 	}
1633 
1634 	/*
1635 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1636 	 * Thaw it here.
1637 	 */
1638 	if (xs->error != XS_NOERROR)
1639 		scsipi_periph_thaw(periph, 1);
1640 
1641 	/*
1642 	 * Set buffer fields in case the periph
1643 	 * switch done func uses them
1644 	 */
1645 	if ((bp = xs->bp) != NULL) {
1646 		if (error) {
1647 			bp->b_error = error;
1648 			bp->b_flags |= B_ERROR;
1649 			bp->b_resid = bp->b_bcount;
1650 		} else {
1651 			bp->b_error = 0;
1652 			bp->b_resid = xs->resid;
1653 		}
1654 	}
1655 
1656 	if (periph->periph_switch->psw_done)
1657 		periph->periph_switch->psw_done(xs);
1658 
1659 	if (bp)
1660 		biodone(bp);
1661 
1662 	if (xs->xs_control & XS_CTL_ASYNC)
1663 		scsipi_put_xs(xs);
1664 	splx(s);
1665 
1666 	return (error);
1667 }
1668 
1669 /*
1670  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1671  * returns with a CHECK_CONDITION status. Must be called in valid thread
1672  * context and at splbio().
1673  */
1674 
1675 static void
1676 scsipi_request_sense(struct scsipi_xfer *xs)
1677 {
1678 	struct scsipi_periph *periph = xs->xs_periph;
1679 	int flags, error;
1680 	struct scsipi_sense cmd;
1681 
1682 	periph->periph_flags |= PERIPH_SENSE;
1683 
1684 	/* if command was polling, request sense will too */
1685 	flags = xs->xs_control & XS_CTL_POLL;
1686 	/* Polling commands can't sleep */
1687 	if (flags)
1688 		flags |= XS_CTL_NOSLEEP;
1689 
1690 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1691 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1692 
1693 	memset(&cmd, 0, sizeof(cmd));
1694 	cmd.opcode = REQUEST_SENSE;
1695 	cmd.length = sizeof(struct scsipi_sense_data);
1696 
1697 	error = scsipi_command(periph, NULL,
1698 	    (struct scsipi_generic *) &cmd, sizeof(cmd),
1699 	    (u_char*)&xs->sense.scsi_sense, sizeof(struct scsipi_sense_data),
1700 	    0, 1000, NULL, flags);
1701 	periph->periph_flags &= ~PERIPH_SENSE;
1702 	periph->periph_xscheck = NULL;
1703 	switch(error) {
1704 	case 0:
1705 		/* we have a valid sense */
1706 		xs->error = XS_SENSE;
1707 		return;
1708 	case EINTR:
1709 		/* REQUEST_SENSE interrupted by bus reset. */
1710 		xs->error = XS_RESET;
1711 		return;
1712 	case EIO:
1713 		 /* request sense coudn't be performed */
1714 		/*
1715 		 * XXX this isn't quite right but we don't have anything
1716 		 * better for now
1717 		 */
1718 		xs->error = XS_DRIVER_STUFFUP;
1719 		return;
1720 	default:
1721 		 /* Notify that request sense failed. */
1722 		xs->error = XS_DRIVER_STUFFUP;
1723 		scsipi_printaddr(periph);
1724 		printf("request sense failed with error %d\n", error);
1725 		return;
1726 	}
1727 }
1728 
1729 /*
1730  * scsipi_enqueue:
1731  *
1732  *	Enqueue an xfer on a channel.
1733  */
1734 static int
1735 scsipi_enqueue(struct scsipi_xfer *xs)
1736 {
1737 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1738 	struct scsipi_xfer *qxs;
1739 	int s;
1740 
1741 	s = splbio();
1742 
1743 	/*
1744 	 * If the xfer is to be polled, and there are already jobs on
1745 	 * the queue, we can't proceed.
1746 	 */
1747 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1748 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1749 		splx(s);
1750 		xs->error = XS_DRIVER_STUFFUP;
1751 		return (EAGAIN);
1752 	}
1753 
1754 	/*
1755 	 * If we have an URGENT xfer, it's an error recovery command
1756 	 * and it should just go on the head of the channel's queue.
1757 	 */
1758 	if (xs->xs_control & XS_CTL_URGENT) {
1759 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1760 		goto out;
1761 	}
1762 
1763 	/*
1764 	 * If this xfer has already been on the queue before, we
1765 	 * need to reinsert it in the correct order.  That order is:
1766 	 *
1767 	 *	Immediately before the first xfer for this periph
1768 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1769 	 *
1770 	 * Failing that, at the end of the queue.  (We'll end up
1771 	 * there naturally.)
1772 	 */
1773 	if (xs->xs_requeuecnt != 0) {
1774 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1775 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1776 			if (qxs->xs_periph == xs->xs_periph &&
1777 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1778 				break;
1779 		}
1780 		if (qxs != NULL) {
1781 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1782 			    channel_q);
1783 			goto out;
1784 		}
1785 	}
1786 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1787  out:
1788 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1789 		scsipi_periph_thaw(xs->xs_periph, 1);
1790 	splx(s);
1791 	return (0);
1792 }
1793 
1794 /*
1795  * scsipi_run_queue:
1796  *
1797  *	Start as many xfers as possible running on the channel.
1798  */
1799 static void
1800 scsipi_run_queue(struct scsipi_channel *chan)
1801 {
1802 	struct scsipi_xfer *xs;
1803 	struct scsipi_periph *periph;
1804 	int s;
1805 
1806 	for (;;) {
1807 		s = splbio();
1808 
1809 		/*
1810 		 * If the channel is frozen, we can't do any work right
1811 		 * now.
1812 		 */
1813 		if (chan->chan_qfreeze != 0) {
1814 			splx(s);
1815 			return;
1816 		}
1817 
1818 		/*
1819 		 * Look for work to do, and make sure we can do it.
1820 		 */
1821 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1822 		     xs = TAILQ_NEXT(xs, channel_q)) {
1823 			periph = xs->xs_periph;
1824 
1825 			if ((periph->periph_sent >= periph->periph_openings) ||
1826 			    periph->periph_qfreeze != 0 ||
1827 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1828 				continue;
1829 
1830 			if ((periph->periph_flags &
1831 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1832 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1833 				continue;
1834 
1835 			/*
1836 			 * We can issue this xfer!
1837 			 */
1838 			goto got_one;
1839 		}
1840 
1841 		/*
1842 		 * Can't find any work to do right now.
1843 		 */
1844 		splx(s);
1845 		return;
1846 
1847  got_one:
1848 		/*
1849 		 * Have an xfer to run.  Allocate a resource from
1850 		 * the adapter to run it.  If we can't allocate that
1851 		 * resource, we don't dequeue the xfer.
1852 		 */
1853 		if (scsipi_get_resource(chan) == 0) {
1854 			/*
1855 			 * Adapter is out of resources.  If the adapter
1856 			 * supports it, attempt to grow them.
1857 			 */
1858 			if (scsipi_grow_resources(chan) == 0) {
1859 				/*
1860 				 * Wasn't able to grow resources,
1861 				 * nothing more we can do.
1862 				 */
1863 				if (xs->xs_control & XS_CTL_POLL) {
1864 					scsipi_printaddr(xs->xs_periph);
1865 					printf("polling command but no "
1866 					    "adapter resources");
1867 					/* We'll panic shortly... */
1868 				}
1869 				splx(s);
1870 
1871 				/*
1872 				 * XXX: We should be able to note that
1873 				 * XXX: that resources are needed here!
1874 				 */
1875 				return;
1876 			}
1877 			/*
1878 			 * scsipi_grow_resources() allocated the resource
1879 			 * for us.
1880 			 */
1881 		}
1882 
1883 		/*
1884 		 * We have a resource to run this xfer, do it!
1885 		 */
1886 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1887 
1888 		/*
1889 		 * If the command is to be tagged, allocate a tag ID
1890 		 * for it.
1891 		 */
1892 		if (XS_CTL_TAGTYPE(xs) != 0)
1893 			scsipi_get_tag(xs);
1894 		else
1895 			periph->periph_flags |= PERIPH_UNTAG;
1896 		periph->periph_sent++;
1897 		splx(s);
1898 
1899 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1900 	}
1901 #ifdef DIAGNOSTIC
1902 	panic("scsipi_run_queue: impossible");
1903 #endif
1904 }
1905 
1906 /*
1907  * scsipi_execute_xs:
1908  *
1909  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1910  */
1911 int
1912 scsipi_execute_xs(struct scsipi_xfer *xs)
1913 {
1914 	struct scsipi_periph *periph = xs->xs_periph;
1915 	struct scsipi_channel *chan = periph->periph_channel;
1916 	int oasync, async, poll, retries, error, s;
1917 
1918 	xs->xs_status &= ~XS_STS_DONE;
1919 	xs->error = XS_NOERROR;
1920 	xs->resid = xs->datalen;
1921 	xs->status = SCSI_OK;
1922 
1923 #ifdef SCSIPI_DEBUG
1924 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1925 		printf("scsipi_execute_xs: ");
1926 		show_scsipi_xs(xs);
1927 		printf("\n");
1928 	}
1929 #endif
1930 
1931 	/*
1932 	 * Deal with command tagging:
1933 	 *
1934 	 *	- If the device's current operating mode doesn't
1935 	 *	  include tagged queueing, clear the tag mask.
1936 	 *
1937 	 *	- If the device's current operating mode *does*
1938 	 *	  include tagged queueing, set the tag_type in
1939 	 *	  the xfer to the appropriate byte for the tag
1940 	 *	  message.
1941 	 */
1942 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1943 		(xs->xs_control & XS_CTL_REQSENSE)) {
1944 		xs->xs_control &= ~XS_CTL_TAGMASK;
1945 		xs->xs_tag_type = 0;
1946 	} else {
1947 		/*
1948 		 * If the request doesn't specify a tag, give Head
1949 		 * tags to URGENT operations and Ordered tags to
1950 		 * everything else.
1951 		 */
1952 		if (XS_CTL_TAGTYPE(xs) == 0) {
1953 			if (xs->xs_control & XS_CTL_URGENT)
1954 				xs->xs_control |= XS_CTL_HEAD_TAG;
1955 			else
1956 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1957 		}
1958 
1959 		switch (XS_CTL_TAGTYPE(xs)) {
1960 		case XS_CTL_ORDERED_TAG:
1961 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1962 			break;
1963 
1964 		case XS_CTL_SIMPLE_TAG:
1965 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1966 			break;
1967 
1968 		case XS_CTL_HEAD_TAG:
1969 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1970 			break;
1971 
1972 		default:
1973 			scsipi_printaddr(periph);
1974 			printf("invalid tag mask 0x%08x\n",
1975 			    XS_CTL_TAGTYPE(xs));
1976 			panic("scsipi_execute_xs");
1977 		}
1978 	}
1979 
1980 	/* If the adaptor wants us to poll, poll. */
1981 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1982 		xs->xs_control |= XS_CTL_POLL;
1983 
1984 	/*
1985 	 * If we don't yet have a completion thread, or we are to poll for
1986 	 * completion, clear the ASYNC flag.
1987 	 */
1988 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
1989 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1990 		xs->xs_control &= ~XS_CTL_ASYNC;
1991 
1992 	async = (xs->xs_control & XS_CTL_ASYNC);
1993 	poll = (xs->xs_control & XS_CTL_POLL);
1994 	retries = xs->xs_retries;		/* for polling commands */
1995 
1996 #ifdef DIAGNOSTIC
1997 	if (oasync != 0 && xs->bp == NULL)
1998 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1999 #endif
2000 
2001 	/*
2002 	 * Enqueue the transfer.  If we're not polling for completion, this
2003 	 * should ALWAYS return `no error'.
2004 	 */
2005  try_again:
2006 	error = scsipi_enqueue(xs);
2007 	if (error) {
2008 		if (poll == 0) {
2009 			scsipi_printaddr(periph);
2010 			printf("not polling, but enqueue failed with %d\n",
2011 			    error);
2012 			panic("scsipi_execute_xs");
2013 		}
2014 
2015 		scsipi_printaddr(periph);
2016 		printf("failed to enqueue polling command");
2017 		if (retries != 0) {
2018 			printf(", retrying...\n");
2019 			delay(1000000);
2020 			retries--;
2021 			goto try_again;
2022 		}
2023 		printf("\n");
2024 		goto free_xs;
2025 	}
2026 
2027  restarted:
2028 	scsipi_run_queue(chan);
2029 
2030 	/*
2031 	 * The xfer is enqueued, and possibly running.  If it's to be
2032 	 * completed asynchronously, just return now.
2033 	 */
2034 	if (async)
2035 		return (EJUSTRETURN);
2036 
2037 	/*
2038 	 * Not an asynchronous command; wait for it to complete.
2039 	 */
2040 	s = splbio();
2041 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2042 		if (poll) {
2043 			scsipi_printaddr(periph);
2044 			printf("polling command not done\n");
2045 			panic("scsipi_execute_xs");
2046 		}
2047 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
2048 	}
2049 	splx(s);
2050 
2051 	/*
2052 	 * Command is complete.  scsipi_done() has awakened us to perform
2053 	 * the error handling.
2054 	 */
2055 	error = scsipi_complete(xs);
2056 	if (error == ERESTART)
2057 		goto restarted;
2058 
2059 	/*
2060 	 * If it was meant to run async and we cleared aync ourselve,
2061 	 * don't return an error here. It has already been handled
2062 	 */
2063 	if (oasync)
2064 		error = EJUSTRETURN;
2065 	/*
2066 	 * Command completed successfully or fatal error occurred.  Fall
2067 	 * into....
2068 	 */
2069  free_xs:
2070 	s = splbio();
2071 	scsipi_put_xs(xs);
2072 	splx(s);
2073 
2074 	/*
2075 	 * Kick the queue, keep it running in case it stopped for some
2076 	 * reason.
2077 	 */
2078 	scsipi_run_queue(chan);
2079 
2080 	return (error);
2081 }
2082 
2083 /*
2084  * scsipi_completion_thread:
2085  *
2086  *	This is the completion thread.  We wait for errors on
2087  *	asynchronous xfers, and perform the error handling
2088  *	function, restarting the command, if necessary.
2089  */
2090 static void
2091 scsipi_completion_thread(void *arg)
2092 {
2093 	struct scsipi_channel *chan = arg;
2094 	struct scsipi_xfer *xs;
2095 	int s;
2096 
2097 	if (chan->chan_init_cb)
2098 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2099 
2100 	s = splbio();
2101 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2102 	splx(s);
2103 	for (;;) {
2104 		s = splbio();
2105 		xs = TAILQ_FIRST(&chan->chan_complete);
2106 		if (xs == NULL && chan->chan_tflags  == 0) {
2107 			/* nothing to do; wait */
2108 			(void) tsleep(&chan->chan_complete, PRIBIO,
2109 			    "sccomp", 0);
2110 			splx(s);
2111 			continue;
2112 		}
2113 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2114 			/* call chan_callback from thread context */
2115 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2116 			chan->chan_callback(chan, chan->chan_callback_arg);
2117 			splx(s);
2118 			continue;
2119 		}
2120 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2121 			/* attempt to get more openings for this channel */
2122 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2123 			scsipi_adapter_request(chan,
2124 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2125 			scsipi_channel_thaw(chan, 1);
2126 			splx(s);
2127 			continue;
2128 		}
2129 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2130 			/* explicitly run the queues for this channel */
2131 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2132 			scsipi_run_queue(chan);
2133 			splx(s);
2134 			continue;
2135 		}
2136 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2137 			splx(s);
2138 			break;
2139 		}
2140 		if (xs) {
2141 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2142 			splx(s);
2143 
2144 			/*
2145 			 * Have an xfer with an error; process it.
2146 			 */
2147 			(void) scsipi_complete(xs);
2148 
2149 			/*
2150 			 * Kick the queue; keep it running if it was stopped
2151 			 * for some reason.
2152 			 */
2153 			scsipi_run_queue(chan);
2154 		} else {
2155 			splx(s);
2156 		}
2157 	}
2158 
2159 	chan->chan_thread = NULL;
2160 
2161 	/* In case parent is waiting for us to exit. */
2162 	wakeup(&chan->chan_thread);
2163 
2164 	kthread_exit(0);
2165 }
2166 
2167 /*
2168  * scsipi_create_completion_thread:
2169  *
2170  *	Callback to actually create the completion thread.
2171  */
2172 void
2173 scsipi_create_completion_thread(void *arg)
2174 {
2175 	struct scsipi_channel *chan = arg;
2176 	struct scsipi_adapter *adapt = chan->chan_adapter;
2177 
2178 	if (kthread_create1(scsipi_completion_thread, chan,
2179 	    &chan->chan_thread, "%s", chan->chan_name)) {
2180 		printf("%s: unable to create completion thread for "
2181 		    "channel %d\n", adapt->adapt_dev->dv_xname,
2182 		    chan->chan_channel);
2183 		panic("scsipi_create_completion_thread");
2184 	}
2185 }
2186 
2187 /*
2188  * scsipi_thread_call_callback:
2189  *
2190  * 	request to call a callback from the completion thread
2191  */
2192 int
2193 scsipi_thread_call_callback(struct scsipi_channel *chan,
2194     void (*callback)(struct scsipi_channel *, void *), void *arg)
2195 {
2196 	int s;
2197 
2198 	s = splbio();
2199 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2200 		/* kernel thread doesn't exist yet */
2201 		splx(s);
2202 		return ESRCH;
2203 	}
2204 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2205 		splx(s);
2206 		return EBUSY;
2207 	}
2208 	scsipi_channel_freeze(chan, 1);
2209 	chan->chan_callback = callback;
2210 	chan->chan_callback_arg = arg;
2211 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2212 	wakeup(&chan->chan_complete);
2213 	splx(s);
2214 	return(0);
2215 }
2216 
2217 /*
2218  * scsipi_async_event:
2219  *
2220  *	Handle an asynchronous event from an adapter.
2221  */
2222 void
2223 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2224     void *arg)
2225 {
2226 	int s;
2227 
2228 	s = splbio();
2229 	switch (event) {
2230 	case ASYNC_EVENT_MAX_OPENINGS:
2231 		scsipi_async_event_max_openings(chan,
2232 		    (struct scsipi_max_openings *)arg);
2233 		break;
2234 
2235 	case ASYNC_EVENT_XFER_MODE:
2236 		scsipi_async_event_xfer_mode(chan,
2237 		    (struct scsipi_xfer_mode *)arg);
2238 		break;
2239 	case ASYNC_EVENT_RESET:
2240 		scsipi_async_event_channel_reset(chan);
2241 		break;
2242 	}
2243 	splx(s);
2244 }
2245 
2246 /*
2247  * scsipi_print_xfer_mode:
2248  *
2249  *	Print a periph's capabilities.
2250  */
2251 void
2252 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2253 {
2254 	int period, freq, speed, mbs;
2255 
2256 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2257 		return;
2258 
2259 	aprint_normal("%s: ", periph->periph_dev->dv_xname);
2260 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2261 		period = scsipi_sync_factor_to_period(periph->periph_period);
2262 		aprint_normal("sync (%d.%02dns offset %d)",
2263 		    period / 100, period % 100, periph->periph_offset);
2264 	} else
2265 		aprint_normal("async");
2266 
2267 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2268 		aprint_normal(", 32-bit");
2269 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2270 		aprint_normal(", 16-bit");
2271 	else
2272 		aprint_normal(", 8-bit");
2273 
2274 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2275 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2276 		speed = freq;
2277 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2278 			speed *= 4;
2279 		else if (periph->periph_mode &
2280 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2281 			speed *= 2;
2282 		mbs = speed / 1000;
2283 		if (mbs > 0)
2284 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2285 		else
2286 			aprint_normal(" (%dKB/s)", speed % 1000);
2287 	}
2288 
2289 	aprint_normal(" transfers");
2290 
2291 	if (periph->periph_mode & PERIPH_CAP_TQING)
2292 		aprint_normal(", tagged queueing");
2293 
2294 	aprint_normal("\n");
2295 }
2296 
2297 /*
2298  * scsipi_async_event_max_openings:
2299  *
2300  *	Update the maximum number of outstanding commands a
2301  *	device may have.
2302  */
2303 static void
2304 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2305     struct scsipi_max_openings *mo)
2306 {
2307 	struct scsipi_periph *periph;
2308 	int minlun, maxlun;
2309 
2310 	if (mo->mo_lun == -1) {
2311 		/*
2312 		 * Wildcarded; apply it to all LUNs.
2313 		 */
2314 		minlun = 0;
2315 		maxlun = chan->chan_nluns - 1;
2316 	} else
2317 		minlun = maxlun = mo->mo_lun;
2318 
2319 	/* XXX This could really suck with a large LUN space. */
2320 	for (; minlun <= maxlun; minlun++) {
2321 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2322 		if (periph == NULL)
2323 			continue;
2324 
2325 		if (mo->mo_openings < periph->periph_openings)
2326 			periph->periph_openings = mo->mo_openings;
2327 		else if (mo->mo_openings > periph->periph_openings &&
2328 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2329 			periph->periph_openings = mo->mo_openings;
2330 	}
2331 }
2332 
2333 /*
2334  * scsipi_async_event_xfer_mode:
2335  *
2336  *	Update the xfer mode for all periphs sharing the
2337  *	specified I_T Nexus.
2338  */
2339 static void
2340 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2341     struct scsipi_xfer_mode *xm)
2342 {
2343 	struct scsipi_periph *periph;
2344 	int lun, announce, mode, period, offset;
2345 
2346 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2347 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2348 		if (periph == NULL)
2349 			continue;
2350 		announce = 0;
2351 
2352 		/*
2353 		 * Clamp the xfer mode down to this periph's capabilities.
2354 		 */
2355 		mode = xm->xm_mode & periph->periph_cap;
2356 		if (mode & PERIPH_CAP_SYNC) {
2357 			period = xm->xm_period;
2358 			offset = xm->xm_offset;
2359 		} else {
2360 			period = 0;
2361 			offset = 0;
2362 		}
2363 
2364 		/*
2365 		 * If we do not have a valid xfer mode yet, or the parameters
2366 		 * are different, announce them.
2367 		 */
2368 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2369 		    periph->periph_mode != mode ||
2370 		    periph->periph_period != period ||
2371 		    periph->periph_offset != offset)
2372 			announce = 1;
2373 
2374 		periph->periph_mode = mode;
2375 		periph->periph_period = period;
2376 		periph->periph_offset = offset;
2377 		periph->periph_flags |= PERIPH_MODE_VALID;
2378 
2379 		if (announce)
2380 			scsipi_print_xfer_mode(periph);
2381 	}
2382 }
2383 
2384 /*
2385  * scsipi_set_xfer_mode:
2386  *
2387  *	Set the xfer mode for the specified I_T Nexus.
2388  */
2389 void
2390 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2391 {
2392 	struct scsipi_xfer_mode xm;
2393 	struct scsipi_periph *itperiph;
2394 	int lun, s;
2395 
2396 	/*
2397 	 * Go to the minimal xfer mode.
2398 	 */
2399 	xm.xm_target = target;
2400 	xm.xm_mode = 0;
2401 	xm.xm_period = 0;			/* ignored */
2402 	xm.xm_offset = 0;			/* ignored */
2403 
2404 	/*
2405 	 * Find the first LUN we know about on this I_T Nexus.
2406 	 */
2407 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2408 		itperiph = scsipi_lookup_periph(chan, target, lun);
2409 		if (itperiph != NULL)
2410 			break;
2411 	}
2412 	if (itperiph != NULL) {
2413 		xm.xm_mode = itperiph->periph_cap;
2414 		/*
2415 		 * Now issue the request to the adapter.
2416 		 */
2417 		s = splbio();
2418 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2419 		splx(s);
2420 		/*
2421 		 * If we want this to happen immediately, issue a dummy
2422 		 * command, since most adapters can't really negotiate unless
2423 		 * they're executing a job.
2424 		 */
2425 		if (immed != 0) {
2426 			(void) scsipi_test_unit_ready(itperiph,
2427 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2428 			    XS_CTL_IGNORE_NOT_READY |
2429 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2430 		}
2431 	}
2432 }
2433 
2434 /*
2435  * scsipi_channel_reset:
2436  *
2437  *	handle scsi bus reset
2438  * called at splbio
2439  */
2440 static void
2441 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2442 {
2443 	struct scsipi_xfer *xs, *xs_next;
2444 	struct scsipi_periph *periph;
2445 	int target, lun;
2446 
2447 	/*
2448 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2449 	 * commands; as the sense is not available any more.
2450 	 * can't call scsipi_done() from here, as the command has not been
2451 	 * sent to the adapter yet (this would corrupt accounting).
2452 	 */
2453 
2454 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2455 		xs_next = TAILQ_NEXT(xs, channel_q);
2456 		if (xs->xs_control & XS_CTL_REQSENSE) {
2457 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2458 			xs->error = XS_RESET;
2459 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2460 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2461 				    channel_q);
2462 		}
2463 	}
2464 	wakeup(&chan->chan_complete);
2465 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2466 	for (target = 0; target < chan->chan_ntargets; target++) {
2467 		if (target == chan->chan_id)
2468 			continue;
2469 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2470 			periph = scsipi_lookup_periph(chan, target, lun);
2471 			if (periph) {
2472 				xs = periph->periph_xscheck;
2473 				if (xs)
2474 					xs->error = XS_RESET;
2475 			}
2476 		}
2477 	}
2478 }
2479 
2480 /*
2481  * scsipi_target_detach:
2482  *
2483  *	detach all periph associated with a I_T
2484  * 	must be called from valid thread context
2485  */
2486 int
2487 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2488     int flags)
2489 {
2490 	struct scsipi_periph *periph;
2491 	int ctarget, mintarget, maxtarget;
2492 	int clun, minlun, maxlun;
2493 	int error;
2494 
2495 	if (target == -1) {
2496 		mintarget = 0;
2497 		maxtarget = chan->chan_ntargets;
2498 	} else {
2499 		if (target == chan->chan_id)
2500 			return EINVAL;
2501 		if (target < 0 || target >= chan->chan_ntargets)
2502 			return EINVAL;
2503 		mintarget = target;
2504 		maxtarget = target + 1;
2505 	}
2506 
2507 	if (lun == -1) {
2508 		minlun = 0;
2509 		maxlun = chan->chan_nluns;
2510 	} else {
2511 		if (lun < 0 || lun >= chan->chan_nluns)
2512 			return EINVAL;
2513 		minlun = lun;
2514 		maxlun = lun + 1;
2515 	}
2516 
2517 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2518 		if (ctarget == chan->chan_id)
2519 			continue;
2520 
2521 		for (clun = minlun; clun < maxlun; clun++) {
2522 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2523 			if (periph == NULL)
2524 				continue;
2525 			error = config_detach(periph->periph_dev, flags);
2526 			if (error)
2527 				return (error);
2528 		}
2529 	}
2530 	return(0);
2531 }
2532 
2533 /*
2534  * scsipi_adapter_addref:
2535  *
2536  *	Add a reference to the adapter pointed to by the provided
2537  *	link, enabling the adapter if necessary.
2538  */
2539 int
2540 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2541 {
2542 	int s, error = 0;
2543 
2544 	s = splbio();
2545 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2546 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2547 		if (error)
2548 			adapt->adapt_refcnt--;
2549 	}
2550 	splx(s);
2551 	return (error);
2552 }
2553 
2554 /*
2555  * scsipi_adapter_delref:
2556  *
2557  *	Delete a reference to the adapter pointed to by the provided
2558  *	link, disabling the adapter if possible.
2559  */
2560 void
2561 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2562 {
2563 	int s;
2564 
2565 	s = splbio();
2566 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2567 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2568 	splx(s);
2569 }
2570 
2571 static struct scsipi_syncparam {
2572 	int	ss_factor;
2573 	int	ss_period;	/* ns * 100 */
2574 } scsipi_syncparams[] = {
2575 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2576 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2577 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2578 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2579 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2580 };
2581 static const int scsipi_nsyncparams =
2582     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2583 
2584 int
2585 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2586 {
2587 	int i;
2588 
2589 	for (i = 0; i < scsipi_nsyncparams; i++) {
2590 		if (period <= scsipi_syncparams[i].ss_period)
2591 			return (scsipi_syncparams[i].ss_factor);
2592 	}
2593 
2594 	return ((period / 100) / 4);
2595 }
2596 
2597 int
2598 scsipi_sync_factor_to_period(int factor)
2599 {
2600 	int i;
2601 
2602 	for (i = 0; i < scsipi_nsyncparams; i++) {
2603 		if (factor == scsipi_syncparams[i].ss_factor)
2604 			return (scsipi_syncparams[i].ss_period);
2605 	}
2606 
2607 	return ((factor * 4) * 100);
2608 }
2609 
2610 int
2611 scsipi_sync_factor_to_freq(int factor)
2612 {
2613 	int i;
2614 
2615 	for (i = 0; i < scsipi_nsyncparams; i++) {
2616 		if (factor == scsipi_syncparams[i].ss_factor)
2617 			return (100000000 / scsipi_syncparams[i].ss_period);
2618 	}
2619 
2620 	return (10000000 / ((factor * 4) * 10));
2621 }
2622 
2623 #ifdef SCSIPI_DEBUG
2624 /*
2625  * Given a scsipi_xfer, dump the request, in all it's glory
2626  */
2627 void
2628 show_scsipi_xs(struct scsipi_xfer *xs)
2629 {
2630 
2631 	printf("xs(%p): ", xs);
2632 	printf("xs_control(0x%08x)", xs->xs_control);
2633 	printf("xs_status(0x%08x)", xs->xs_status);
2634 	printf("periph(%p)", xs->xs_periph);
2635 	printf("retr(0x%x)", xs->xs_retries);
2636 	printf("timo(0x%x)", xs->timeout);
2637 	printf("cmd(%p)", xs->cmd);
2638 	printf("len(0x%x)", xs->cmdlen);
2639 	printf("data(%p)", xs->data);
2640 	printf("len(0x%x)", xs->datalen);
2641 	printf("res(0x%x)", xs->resid);
2642 	printf("err(0x%x)", xs->error);
2643 	printf("bp(%p)", xs->bp);
2644 	show_scsipi_cmd(xs);
2645 }
2646 
2647 void
2648 show_scsipi_cmd(struct scsipi_xfer *xs)
2649 {
2650 	u_char *b = (u_char *) xs->cmd;
2651 	int i = 0;
2652 
2653 	scsipi_printaddr(xs->xs_periph);
2654 	printf(" command: ");
2655 
2656 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2657 		while (i < xs->cmdlen) {
2658 			if (i)
2659 				printf(",");
2660 			printf("0x%x", b[i++]);
2661 		}
2662 		printf("-[%d bytes]\n", xs->datalen);
2663 		if (xs->datalen)
2664 			show_mem(xs->data, min(64, xs->datalen));
2665 	} else
2666 		printf("-RESET-\n");
2667 }
2668 
2669 void
2670 show_mem(u_char *address, int num)
2671 {
2672 	int x;
2673 
2674 	printf("------------------------------");
2675 	for (x = 0; x < num; x++) {
2676 		if ((x % 16) == 0)
2677 			printf("\n%03d: ", x);
2678 		printf("%02x ", *address++);
2679 	}
2680 	printf("\n------------------------------\n");
2681 }
2682 #endif /* SCSIPI_DEBUG */
2683