xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision b7ae68fde0d8ef1c03714e8bbb1ee7c6118ea93b)
1 /*	$NetBSD: scsipi_base.c,v 1.137 2006/09/11 19:43:55 reinoud Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.137 2006/09/11 19:43:55 reinoud Exp $");
42 
43 #include "opt_scsi.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 #include <dev/scsipi/scsi_spc.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsipi_disk.h>
63 #include <dev/scsipi/scsipiconf.h>
64 #include <dev/scsipi/scsipi_base.h>
65 
66 #include <dev/scsipi/scsi_all.h>
67 #include <dev/scsipi/scsi_message.h>
68 
69 static int	scsipi_complete(struct scsipi_xfer *);
70 static void	scsipi_request_sense(struct scsipi_xfer *);
71 static int	scsipi_enqueue(struct scsipi_xfer *);
72 static void	scsipi_run_queue(struct scsipi_channel *chan);
73 
74 static void	scsipi_completion_thread(void *);
75 
76 static void	scsipi_get_tag(struct scsipi_xfer *);
77 static void	scsipi_put_tag(struct scsipi_xfer *);
78 
79 static int	scsipi_get_resource(struct scsipi_channel *);
80 static void	scsipi_put_resource(struct scsipi_channel *);
81 
82 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
83 		    struct scsipi_max_openings *);
84 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
85 		    struct scsipi_xfer_mode *);
86 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
87 
88 static struct pool scsipi_xfer_pool;
89 
90 /*
91  * scsipi_init:
92  *
93  *	Called when a scsibus or atapibus is attached to the system
94  *	to initialize shared data structures.
95  */
96 void
97 scsipi_init(void)
98 {
99 	static int scsipi_init_done;
100 
101 	if (scsipi_init_done)
102 		return;
103 	scsipi_init_done = 1;
104 
105 	/* Initialize the scsipi_xfer pool. */
106 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 	    0, 0, "scxspl", NULL);
108 	if (pool_prime(&scsipi_xfer_pool,
109 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 	}
112 }
113 
114 /*
115  * scsipi_channel_init:
116  *
117  *	Initialize a scsipi_channel when it is attached.
118  */
119 int
120 scsipi_channel_init(struct scsipi_channel *chan)
121 {
122 	int i;
123 
124 	/* Initialize shared data. */
125 	scsipi_init();
126 
127 	/* Initialize the queues. */
128 	TAILQ_INIT(&chan->chan_queue);
129 	TAILQ_INIT(&chan->chan_complete);
130 
131 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
132 		LIST_INIT(&chan->chan_periphtab[i]);
133 
134 	/*
135 	 * Create the asynchronous completion thread.
136 	 */
137 	kthread_create(scsipi_create_completion_thread, chan);
138 	return (0);
139 }
140 
141 /*
142  * scsipi_channel_shutdown:
143  *
144  *	Shutdown a scsipi_channel.
145  */
146 void
147 scsipi_channel_shutdown(struct scsipi_channel *chan)
148 {
149 
150 	/*
151 	 * Shut down the completion thread.
152 	 */
153 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
154 	wakeup(&chan->chan_complete);
155 
156 	/*
157 	 * Now wait for the thread to exit.
158 	 */
159 	while (chan->chan_thread != NULL)
160 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
161 }
162 
163 static uint32_t
164 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
165 {
166 	uint32_t hash;
167 
168 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
169 	hash = hash32_buf(&l, sizeof(l), hash);
170 
171 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
172 }
173 
174 /*
175  * scsipi_insert_periph:
176  *
177  *	Insert a periph into the channel.
178  */
179 void
180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
181 {
182 	uint32_t hash;
183 	int s;
184 
185 	hash = scsipi_chan_periph_hash(periph->periph_target,
186 	    periph->periph_lun);
187 
188 	s = splbio();
189 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
190 	splx(s);
191 }
192 
193 /*
194  * scsipi_remove_periph:
195  *
196  *	Remove a periph from the channel.
197  */
198 void
199 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
200 {
201 	int s;
202 
203 	s = splbio();
204 	LIST_REMOVE(periph, periph_hash);
205 	splx(s);
206 }
207 
208 /*
209  * scsipi_lookup_periph:
210  *
211  *	Lookup a periph on the specified channel.
212  */
213 struct scsipi_periph *
214 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
215 {
216 	struct scsipi_periph *periph;
217 	uint32_t hash;
218 	int s;
219 
220 	if (target >= chan->chan_ntargets ||
221 	    lun >= chan->chan_nluns)
222 		return (NULL);
223 
224 	hash = scsipi_chan_periph_hash(target, lun);
225 
226 	s = splbio();
227 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
228 		if (periph->periph_target == target &&
229 		    periph->periph_lun == lun)
230 			break;
231 	}
232 	splx(s);
233 
234 	return (periph);
235 }
236 
237 /*
238  * scsipi_get_resource:
239  *
240  *	Allocate a single xfer `resource' from the channel.
241  *
242  *	NOTE: Must be called at splbio().
243  */
244 static int
245 scsipi_get_resource(struct scsipi_channel *chan)
246 {
247 	struct scsipi_adapter *adapt = chan->chan_adapter;
248 
249 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
250 		if (chan->chan_openings > 0) {
251 			chan->chan_openings--;
252 			return (1);
253 		}
254 		return (0);
255 	}
256 
257 	if (adapt->adapt_openings > 0) {
258 		adapt->adapt_openings--;
259 		return (1);
260 	}
261 	return (0);
262 }
263 
264 /*
265  * scsipi_grow_resources:
266  *
267  *	Attempt to grow resources for a channel.  If this succeeds,
268  *	we allocate one for our caller.
269  *
270  *	NOTE: Must be called at splbio().
271  */
272 static inline int
273 scsipi_grow_resources(struct scsipi_channel *chan)
274 {
275 
276 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
277 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
278 			scsipi_adapter_request(chan,
279 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
280 			return (scsipi_get_resource(chan));
281 		}
282 		/*
283 		 * ask the channel thread to do it. It'll have to thaw the
284 		 * queue
285 		 */
286 		scsipi_channel_freeze(chan, 1);
287 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
288 		wakeup(&chan->chan_complete);
289 		return (0);
290 	}
291 
292 	return (0);
293 }
294 
295 /*
296  * scsipi_put_resource:
297  *
298  *	Free a single xfer `resource' to the channel.
299  *
300  *	NOTE: Must be called at splbio().
301  */
302 static void
303 scsipi_put_resource(struct scsipi_channel *chan)
304 {
305 	struct scsipi_adapter *adapt = chan->chan_adapter;
306 
307 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
308 		chan->chan_openings++;
309 	else
310 		adapt->adapt_openings++;
311 }
312 
313 /*
314  * scsipi_get_tag:
315  *
316  *	Get a tag ID for the specified xfer.
317  *
318  *	NOTE: Must be called at splbio().
319  */
320 static void
321 scsipi_get_tag(struct scsipi_xfer *xs)
322 {
323 	struct scsipi_periph *periph = xs->xs_periph;
324 	int bit, tag;
325 	u_int word;
326 
327 	bit = 0;	/* XXX gcc */
328 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
329 		bit = ffs(periph->periph_freetags[word]);
330 		if (bit != 0)
331 			break;
332 	}
333 #ifdef DIAGNOSTIC
334 	if (word == PERIPH_NTAGWORDS) {
335 		scsipi_printaddr(periph);
336 		printf("no free tags\n");
337 		panic("scsipi_get_tag");
338 	}
339 #endif
340 
341 	bit -= 1;
342 	periph->periph_freetags[word] &= ~(1 << bit);
343 	tag = (word << 5) | bit;
344 
345 	/* XXX Should eventually disallow this completely. */
346 	if (tag >= periph->periph_openings) {
347 		scsipi_printaddr(periph);
348 		printf("WARNING: tag %d greater than available openings %d\n",
349 		    tag, periph->periph_openings);
350 	}
351 
352 	xs->xs_tag_id = tag;
353 }
354 
355 /*
356  * scsipi_put_tag:
357  *
358  *	Put the tag ID for the specified xfer back into the pool.
359  *
360  *	NOTE: Must be called at splbio().
361  */
362 static void
363 scsipi_put_tag(struct scsipi_xfer *xs)
364 {
365 	struct scsipi_periph *periph = xs->xs_periph;
366 	int word, bit;
367 
368 	word = xs->xs_tag_id >> 5;
369 	bit = xs->xs_tag_id & 0x1f;
370 
371 	periph->periph_freetags[word] |= (1 << bit);
372 }
373 
374 /*
375  * scsipi_get_xs:
376  *
377  *	Allocate an xfer descriptor and associate it with the
378  *	specified peripherial.  If the peripherial has no more
379  *	available command openings, we either block waiting for
380  *	one to become available, or fail.
381  */
382 struct scsipi_xfer *
383 scsipi_get_xs(struct scsipi_periph *periph, int flags)
384 {
385 	struct scsipi_xfer *xs;
386 	int s;
387 
388 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389 
390 	KASSERT(!cold);
391 
392 #ifdef DIAGNOSTIC
393 	/*
394 	 * URGENT commands can never be ASYNC.
395 	 */
396 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
397 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
398 		scsipi_printaddr(periph);
399 		printf("URGENT and ASYNC\n");
400 		panic("scsipi_get_xs");
401 	}
402 #endif
403 
404 	s = splbio();
405 	/*
406 	 * Wait for a command opening to become available.  Rules:
407 	 *
408 	 *	- All xfers must wait for an available opening.
409 	 *	  Exception: URGENT xfers can proceed when
410 	 *	  active == openings, because we use the opening
411 	 *	  of the command we're recovering for.
412 	 *	- if the periph has sense pending, only URGENT & REQSENSE
413 	 *	  xfers may proceed.
414 	 *
415 	 *	- If the periph is recovering, only URGENT xfers may
416 	 *	  proceed.
417 	 *
418 	 *	- If the periph is currently executing a recovery
419 	 *	  command, URGENT commands must block, because only
420 	 *	  one recovery command can execute at a time.
421 	 */
422 	for (;;) {
423 		if (flags & XS_CTL_URGENT) {
424 			if (periph->periph_active > periph->periph_openings)
425 				goto wait_for_opening;
426 			if (periph->periph_flags & PERIPH_SENSE) {
427 				if ((flags & XS_CTL_REQSENSE) == 0)
428 					goto wait_for_opening;
429 			} else {
430 				if ((periph->periph_flags &
431 				    PERIPH_RECOVERY_ACTIVE) != 0)
432 					goto wait_for_opening;
433 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
434 			}
435 			break;
436 		}
437 		if (periph->periph_active >= periph->periph_openings ||
438 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
439 			goto wait_for_opening;
440 		periph->periph_active++;
441 		break;
442 
443  wait_for_opening:
444 		if (flags & XS_CTL_NOSLEEP) {
445 			splx(s);
446 			return (NULL);
447 		}
448 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
449 		periph->periph_flags |= PERIPH_WAITING;
450 		(void) tsleep(periph, PRIBIO, "getxs", 0);
451 	}
452 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
453 	xs = pool_get(&scsipi_xfer_pool,
454 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
455 	if (xs == NULL) {
456 		if (flags & XS_CTL_URGENT) {
457 			if ((flags & XS_CTL_REQSENSE) == 0)
458 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
459 		} else
460 			periph->periph_active--;
461 		scsipi_printaddr(periph);
462 		printf("unable to allocate %sscsipi_xfer\n",
463 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
464 	}
465 	splx(s);
466 
467 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
468 
469 	if (xs != NULL) {
470 		memset(xs, 0, sizeof(*xs));
471 		callout_init(&xs->xs_callout);
472 		xs->xs_periph = periph;
473 		xs->xs_control = flags;
474 		xs->xs_status = 0;
475 		s = splbio();
476 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
477 		splx(s);
478 	}
479 	return (xs);
480 }
481 
482 /*
483  * scsipi_put_xs:
484  *
485  *	Release an xfer descriptor, decreasing the outstanding command
486  *	count for the peripherial.  If there is a thread waiting for
487  *	an opening, wake it up.  If not, kick any queued I/O the
488  *	peripherial may have.
489  *
490  *	NOTE: Must be called at splbio().
491  */
492 void
493 scsipi_put_xs(struct scsipi_xfer *xs)
494 {
495 	struct scsipi_periph *periph = xs->xs_periph;
496 	int flags = xs->xs_control;
497 
498 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
499 
500 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
501 	pool_put(&scsipi_xfer_pool, xs);
502 
503 #ifdef DIAGNOSTIC
504 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
505 	    periph->periph_active == 0) {
506 		scsipi_printaddr(periph);
507 		printf("recovery without a command to recovery for\n");
508 		panic("scsipi_put_xs");
509 	}
510 #endif
511 
512 	if (flags & XS_CTL_URGENT) {
513 		if ((flags & XS_CTL_REQSENSE) == 0)
514 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
515 	} else
516 		periph->periph_active--;
517 	if (periph->periph_active == 0 &&
518 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
519 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
520 		wakeup(&periph->periph_active);
521 	}
522 
523 	if (periph->periph_flags & PERIPH_WAITING) {
524 		periph->periph_flags &= ~PERIPH_WAITING;
525 		wakeup(periph);
526 	} else {
527 		if (periph->periph_switch->psw_start != NULL &&
528 		    device_is_active(periph->periph_dev)) {
529 			SC_DEBUG(periph, SCSIPI_DB2,
530 			    ("calling private start()\n"));
531 			(*periph->periph_switch->psw_start)(periph);
532 		}
533 	}
534 }
535 
536 /*
537  * scsipi_channel_freeze:
538  *
539  *	Freeze a channel's xfer queue.
540  */
541 void
542 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
543 {
544 	int s;
545 
546 	s = splbio();
547 	chan->chan_qfreeze += count;
548 	splx(s);
549 }
550 
551 /*
552  * scsipi_channel_thaw:
553  *
554  *	Thaw a channel's xfer queue.
555  */
556 void
557 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
558 {
559 	int s;
560 
561 	s = splbio();
562 	chan->chan_qfreeze -= count;
563 	/*
564 	 * Don't let the freeze count go negative.
565 	 *
566 	 * Presumably the adapter driver could keep track of this,
567 	 * but it might just be easier to do this here so as to allow
568 	 * multiple callers, including those outside the adapter driver.
569 	 */
570 	if (chan->chan_qfreeze < 0) {
571 		chan->chan_qfreeze = 0;
572 	}
573 	splx(s);
574 	/*
575 	 * Kick the channel's queue here.  Note, we may be running in
576 	 * interrupt context (softclock or HBA's interrupt), so the adapter
577 	 * driver had better not sleep.
578 	 */
579 	if (chan->chan_qfreeze == 0)
580 		scsipi_run_queue(chan);
581 }
582 
583 /*
584  * scsipi_channel_timed_thaw:
585  *
586  *	Thaw a channel after some time has expired. This will also
587  * 	run the channel's queue if the freeze count has reached 0.
588  */
589 void
590 scsipi_channel_timed_thaw(void *arg)
591 {
592 	struct scsipi_channel *chan = arg;
593 
594 	scsipi_channel_thaw(chan, 1);
595 }
596 
597 /*
598  * scsipi_periph_freeze:
599  *
600  *	Freeze a device's xfer queue.
601  */
602 void
603 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
604 {
605 	int s;
606 
607 	s = splbio();
608 	periph->periph_qfreeze += count;
609 	splx(s);
610 }
611 
612 /*
613  * scsipi_periph_thaw:
614  *
615  *	Thaw a device's xfer queue.
616  */
617 void
618 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
619 {
620 	int s;
621 
622 	s = splbio();
623 	periph->periph_qfreeze -= count;
624 #ifdef DIAGNOSTIC
625 	if (periph->periph_qfreeze < 0) {
626 		static const char pc[] = "periph freeze count < 0";
627 		scsipi_printaddr(periph);
628 		printf("%s\n", pc);
629 		panic(pc);
630 	}
631 #endif
632 	if (periph->periph_qfreeze == 0 &&
633 	    (periph->periph_flags & PERIPH_WAITING) != 0)
634 		wakeup(periph);
635 	splx(s);
636 }
637 
638 /*
639  * scsipi_periph_timed_thaw:
640  *
641  *	Thaw a device after some time has expired.
642  */
643 void
644 scsipi_periph_timed_thaw(void *arg)
645 {
646 	int s;
647 	struct scsipi_periph *periph = arg;
648 
649 	callout_stop(&periph->periph_callout);
650 
651 	s = splbio();
652 	scsipi_periph_thaw(periph, 1);
653 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
654 		/*
655 		 * Kick the channel's queue here.  Note, we're running in
656 		 * interrupt context (softclock), so the adapter driver
657 		 * had better not sleep.
658 		 */
659 		scsipi_run_queue(periph->periph_channel);
660 	} else {
661 		/*
662 		 * Tell the completion thread to kick the channel's queue here.
663 		 */
664 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
665 		wakeup(&periph->periph_channel->chan_complete);
666 	}
667 	splx(s);
668 }
669 
670 /*
671  * scsipi_wait_drain:
672  *
673  *	Wait for a periph's pending xfers to drain.
674  */
675 void
676 scsipi_wait_drain(struct scsipi_periph *periph)
677 {
678 	int s;
679 
680 	s = splbio();
681 	while (periph->periph_active != 0) {
682 		periph->periph_flags |= PERIPH_WAITDRAIN;
683 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
684 	}
685 	splx(s);
686 }
687 
688 /*
689  * scsipi_kill_pending:
690  *
691  *	Kill off all pending xfers for a periph.
692  *
693  *	NOTE: Must be called at splbio().
694  */
695 void
696 scsipi_kill_pending(struct scsipi_periph *periph)
697 {
698 
699 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
700 	scsipi_wait_drain(periph);
701 }
702 
703 /*
704  * scsipi_print_cdb:
705  * prints a command descriptor block (for debug purpose, error messages,
706  * SCSIPI_VERBOSE, ...)
707  */
708 void
709 scsipi_print_cdb(struct scsipi_generic *cmd)
710 {
711 	int i, j;
712 
713  	printf("0x%02x", cmd->opcode);
714 
715  	switch (CDB_GROUPID(cmd->opcode)) {
716  	case CDB_GROUPID_0:
717  		j = CDB_GROUP0;
718  		break;
719  	case CDB_GROUPID_1:
720  		j = CDB_GROUP1;
721  		break;
722  	case CDB_GROUPID_2:
723  		j = CDB_GROUP2;
724  		break;
725  	case CDB_GROUPID_3:
726  		j = CDB_GROUP3;
727  		break;
728  	case CDB_GROUPID_4:
729  		j = CDB_GROUP4;
730  		break;
731  	case CDB_GROUPID_5:
732  		j = CDB_GROUP5;
733  		break;
734  	case CDB_GROUPID_6:
735  		j = CDB_GROUP6;
736  		break;
737  	case CDB_GROUPID_7:
738  		j = CDB_GROUP7;
739  		break;
740  	default:
741  		j = 0;
742  	}
743  	if (j == 0)
744  		j = sizeof (cmd->bytes);
745  	for (i = 0; i < j-1; i++) /* already done the opcode */
746  		printf(" %02x", cmd->bytes[i]);
747 }
748 
749 /*
750  * scsipi_interpret_sense:
751  *
752  *	Look at the returned sense and act on the error, determining
753  *	the unix error number to pass back.  (0 = report no error)
754  *
755  *	NOTE: If we return ERESTART, we are expected to haved
756  *	thawed the device!
757  *
758  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
759  */
760 int
761 scsipi_interpret_sense(struct scsipi_xfer *xs)
762 {
763 	struct scsi_sense_data *sense;
764 	struct scsipi_periph *periph = xs->xs_periph;
765 	u_int8_t key;
766 	int error;
767 #ifndef	SCSIVERBOSE
768 	u_int32_t info;
769 	static const char *error_mes[] = {
770 		"soft error (corrected)",
771 		"not ready", "medium error",
772 		"non-media hardware failure", "illegal request",
773 		"unit attention", "readonly device",
774 		"no data found", "vendor unique",
775 		"copy aborted", "command aborted",
776 		"search returned equal", "volume overflow",
777 		"verify miscompare", "unknown error key"
778 	};
779 #endif
780 
781 	sense = &xs->sense.scsi_sense;
782 #ifdef SCSIPI_DEBUG
783 	if (periph->periph_flags & SCSIPI_DB1) {
784 		int count;
785 		scsipi_printaddr(periph);
786 		printf(" sense debug information:\n");
787 		printf("\tcode 0x%x valid %d\n",
788 			SSD_RCODE(sense->response_code),
789 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
790 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
791 			sense->segment,
792 			SSD_SENSE_KEY(sense->flags),
793 			sense->flags & SSD_ILI ? 1 : 0,
794 			sense->flags & SSD_EOM ? 1 : 0,
795 			sense->flags & SSD_FILEMARK ? 1 : 0);
796 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
797 			"extra bytes\n",
798 			sense->info[0],
799 			sense->info[1],
800 			sense->info[2],
801 			sense->info[3],
802 			sense->extra_len);
803 		printf("\textra: ");
804 		for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
805 			printf("0x%x ", sense->csi[count]);
806 		printf("\n");
807 	}
808 #endif
809 
810 	/*
811 	 * If the periph has it's own error handler, call it first.
812 	 * If it returns a legit error value, return that, otherwise
813 	 * it wants us to continue with normal error processing.
814 	 */
815 	if (periph->periph_switch->psw_error != NULL) {
816 		SC_DEBUG(periph, SCSIPI_DB2,
817 		    ("calling private err_handler()\n"));
818 		error = (*periph->periph_switch->psw_error)(xs);
819 		if (error != EJUSTRETURN)
820 			return (error);
821 	}
822 	/* otherwise use the default */
823 	switch (SSD_RCODE(sense->response_code)) {
824 
825 		/*
826 		 * Old SCSI-1 and SASI devices respond with
827 		 * codes other than 70.
828 		 */
829 	case 0x00:		/* no error (command completed OK) */
830 		return (0);
831 	case 0x04:		/* drive not ready after it was selected */
832 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
833 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
834 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
835 			return (0);
836 		/* XXX - display some sort of error here? */
837 		return (EIO);
838 	case 0x20:		/* invalid command */
839 		if ((xs->xs_control &
840 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
841 			return (0);
842 		return (EINVAL);
843 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
844 		return (EACCES);
845 
846 		/*
847 		 * If it's code 70, use the extended stuff and
848 		 * interpret the key
849 		 */
850 	case 0x71:		/* delayed error */
851 		scsipi_printaddr(periph);
852 		key = SSD_SENSE_KEY(sense->flags);
853 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
854 		/* FALLTHROUGH */
855 	case 0x70:
856 #ifndef	SCSIVERBOSE
857 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
858 			info = _4btol(sense->info);
859 		else
860 			info = 0;
861 #endif
862 		key = SSD_SENSE_KEY(sense->flags);
863 
864 		switch (key) {
865 		case SKEY_NO_SENSE:
866 		case SKEY_RECOVERED_ERROR:
867 			if (xs->resid == xs->datalen && xs->datalen) {
868 				/*
869 				 * Why is this here?
870 				 */
871 				xs->resid = 0;	/* not short read */
872 			}
873 		case SKEY_EQUAL:
874 			error = 0;
875 			break;
876 		case SKEY_NOT_READY:
877 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
878 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
879 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
880 				return (0);
881 			if (sense->asc == 0x3A) {
882 				error = ENODEV; /* Medium not present */
883 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
884 					return (error);
885 			} else
886 				error = EIO;
887 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
888 				return (error);
889 			break;
890 		case SKEY_ILLEGAL_REQUEST:
891 			if ((xs->xs_control &
892 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
893 				return (0);
894 			/*
895 			 * Handle the case where a device reports
896 			 * Logical Unit Not Supported during discovery.
897 			 */
898 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
899 			    sense->asc == 0x25 &&
900 			    sense->ascq == 0x00)
901 				return (EINVAL);
902 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
903 				return (EIO);
904 			error = EINVAL;
905 			break;
906 		case SKEY_UNIT_ATTENTION:
907 			if (sense->asc == 0x29 &&
908 			    sense->ascq == 0x00) {
909 				/* device or bus reset */
910 				return (ERESTART);
911 			}
912 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
913 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
914 			if ((xs->xs_control &
915 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
916 				/* XXX Should reupload any transient state. */
917 				(periph->periph_flags &
918 				 PERIPH_REMOVABLE) == 0) {
919 				return (ERESTART);
920 			}
921 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
922 				return (EIO);
923 			error = EIO;
924 			break;
925 		case SKEY_DATA_PROTECT:
926 			error = EROFS;
927 			break;
928 		case SKEY_BLANK_CHECK:
929 			error = 0;
930 			break;
931 		case SKEY_ABORTED_COMMAND:
932 			if (xs->xs_retries != 0) {
933 				xs->xs_retries--;
934 				error = ERESTART;
935 			} else
936 				error = EIO;
937 			break;
938 		case SKEY_VOLUME_OVERFLOW:
939 			error = ENOSPC;
940 			break;
941 		default:
942 			error = EIO;
943 			break;
944 		}
945 
946 #ifdef SCSIVERBOSE
947 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
948 			scsipi_print_sense(xs, 0);
949 #else
950 		if (key) {
951 			scsipi_printaddr(periph);
952 			printf("%s", error_mes[key - 1]);
953 			if ((sense->response_code & SSD_RCODE_VALID) != 0) {
954 				switch (key) {
955 				case SKEY_NOT_READY:
956 				case SKEY_ILLEGAL_REQUEST:
957 				case SKEY_UNIT_ATTENTION:
958 				case SKEY_DATA_PROTECT:
959 					break;
960 				case SKEY_BLANK_CHECK:
961 					printf(", requested size: %d (decimal)",
962 					    info);
963 					break;
964 				case SKEY_ABORTED_COMMAND:
965 					if (xs->xs_retries)
966 						printf(", retrying");
967 					printf(", cmd 0x%x, info 0x%x",
968 					    xs->cmd->opcode, info);
969 					break;
970 				default:
971 					printf(", info = %d (decimal)", info);
972 				}
973 			}
974 			if (sense->extra_len != 0) {
975 				int n;
976 				printf(", data =");
977 				for (n = 0; n < sense->extra_len; n++)
978 					printf(" %02x",
979 					    sense->csi[n]);
980 			}
981 			printf("\n");
982 		}
983 #endif
984 		return (error);
985 
986 	/*
987 	 * Some other code, just report it
988 	 */
989 	default:
990 #if    defined(SCSIDEBUG) || defined(DEBUG)
991 	{
992 		static const char *uc = "undecodable sense error";
993 		int i;
994 		u_int8_t *cptr = (u_int8_t *) sense;
995 		scsipi_printaddr(periph);
996 		if (xs->cmd == &xs->cmdstore) {
997 			printf("%s for opcode 0x%x, data=",
998 			    uc, xs->cmdstore.opcode);
999 		} else {
1000 			printf("%s, data=", uc);
1001 		}
1002 		for (i = 0; i < sizeof (sense); i++)
1003 			printf(" 0x%02x", *(cptr++) & 0xff);
1004 		printf("\n");
1005 	}
1006 #else
1007 		scsipi_printaddr(periph);
1008 		printf("Sense Error Code 0x%x",
1009 			SSD_RCODE(sense->response_code));
1010 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1011 			struct scsi_sense_data_unextended *usense =
1012 			    (struct scsi_sense_data_unextended *)sense;
1013 			printf(" at block no. %d (decimal)",
1014 			    _3btol(usense->block));
1015 		}
1016 		printf("\n");
1017 #endif
1018 		return (EIO);
1019 	}
1020 }
1021 
1022 /*
1023  * scsipi_size:
1024  *
1025  *	Find out from the device what its capacity is.
1026  */
1027 u_int64_t
1028 scsipi_size(struct scsipi_periph *periph, int flags)
1029 {
1030 	union {
1031 		struct scsipi_read_capacity_10 cmd;
1032 		struct scsipi_read_capacity_16 cmd16;
1033 	} cmd;
1034 	union {
1035 		struct scsipi_read_capacity_10_data data;
1036 		struct scsipi_read_capacity_16_data data16;
1037 	} data;
1038 
1039 	memset(&cmd, 0, sizeof(cmd));
1040 	cmd.cmd.opcode = READ_CAPACITY_10;
1041 
1042 	/*
1043 	 * If the command works, interpret the result as a 4 byte
1044 	 * number of blocks
1045 	 */
1046 	if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1047 	    (void *)&data.data, sizeof(data.data), SCSIPIRETRIES, 20000, NULL,
1048 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1049 		return (0);
1050 
1051 	if (_4btol(data.data.addr) != 0xffffffff)
1052 		return (_4btol(data.data.addr) + 1);
1053 
1054 	/*
1055 	 * Device is larger than can be reflected by READ CAPACITY (10).
1056 	 * Try READ CAPACITY (16).
1057 	 */
1058 
1059 	memset(&cmd, 0, sizeof(cmd));
1060 	cmd.cmd16.opcode = READ_CAPACITY_16;
1061 	cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1062 	_lto4b(sizeof(data.data16), cmd.cmd16.len);
1063 
1064 	if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1065 	    (void *)&data.data16, sizeof(data.data16), SCSIPIRETRIES, 20000,
1066 	    NULL,
1067 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1068 		return (0);
1069 
1070 	return (_8btol(data.data16.addr) + 1);
1071 }
1072 
1073 /*
1074  * scsipi_test_unit_ready:
1075  *
1076  *	Issue a `test unit ready' request.
1077  */
1078 int
1079 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1080 {
1081 	struct scsi_test_unit_ready cmd;
1082 	int retries;
1083 
1084 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1085 	if (periph->periph_quirks & PQUIRK_NOTUR)
1086 		return (0);
1087 
1088 	if (flags & XS_CTL_DISCOVERY)
1089 		retries = 0;
1090 	else
1091 		retries = SCSIPIRETRIES;
1092 
1093 	memset(&cmd, 0, sizeof(cmd));
1094 	cmd.opcode = SCSI_TEST_UNIT_READY;
1095 
1096 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1097 	    retries, 10000, NULL, flags));
1098 }
1099 
1100 /*
1101  * scsipi_inquire:
1102  *
1103  *	Ask the device about itself.
1104  */
1105 int
1106 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1107     int flags)
1108 {
1109 	struct scsipi_inquiry cmd;
1110 	int error;
1111 	int retries;
1112 
1113 	if (flags & XS_CTL_DISCOVERY)
1114 		retries = 0;
1115 	else
1116 		retries = SCSIPIRETRIES;
1117 
1118 	/*
1119 	 * If we request more data than the device can provide, it SHOULD just
1120 	 * return a short reponse.  However, some devices error with an
1121 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1122 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1123 	 * and sends corrupted CRCs).  To work around this, and to bring our
1124 	 * behavior more in line with other OSes, we do a shorter inquiry,
1125 	 * covering all the SCSI-2 information, first, and then request more
1126 	 * data iff the "additional length" field indicates there is more.
1127 	 * - mycroft, 2003/10/16
1128 	 */
1129 	memset(&cmd, 0, sizeof(cmd));
1130 	cmd.opcode = INQUIRY;
1131 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1132 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1133 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1134 	    10000, NULL, flags | XS_CTL_DATA_IN);
1135 	if (!error &&
1136 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1137 #if 0
1138 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1139 #endif
1140 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1141 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1142 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1143 		    10000, NULL, flags | XS_CTL_DATA_IN);
1144 #if 0
1145 printf("inquire: error=%d\n", error);
1146 #endif
1147 	}
1148 
1149 #ifdef SCSI_OLD_NOINQUIRY
1150 	/*
1151 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1152 	 * This board doesn't support the INQUIRY command at all.
1153 	 */
1154 	if (error == EINVAL || error == EACCES) {
1155 		/*
1156 		 * Conjure up an INQUIRY response.
1157 		 */
1158 		inqbuf->device = (error == EINVAL ?
1159 			 SID_QUAL_LU_PRESENT :
1160 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1161 		inqbuf->dev_qual2 = 0;
1162 		inqbuf->version = 0;
1163 		inqbuf->response_format = SID_FORMAT_SCSI1;
1164 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1165 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1166 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1167 		error = 0;
1168 	}
1169 
1170 	/*
1171 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1172 	 * This board gives an empty response to an INQUIRY command.
1173 	 */
1174 	else if (error == 0 &&
1175 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1176 	    inqbuf->dev_qual2 == 0 &&
1177 	    inqbuf->version == 0 &&
1178 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1179 		/*
1180 		 * Fill out the INQUIRY response.
1181 		 */
1182 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1183 		inqbuf->dev_qual2 = SID_REMOVABLE;
1184 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1185 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1186 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1187 	}
1188 #endif /* SCSI_OLD_NOINQUIRY */
1189 
1190 	return error;
1191 }
1192 
1193 /*
1194  * scsipi_prevent:
1195  *
1196  *	Prevent or allow the user to remove the media
1197  */
1198 int
1199 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1200 {
1201 	struct scsi_prevent_allow_medium_removal cmd;
1202 
1203 	memset(&cmd, 0, sizeof(cmd));
1204 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1205 	cmd.how = type;
1206 
1207 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1208 	    SCSIPIRETRIES, 5000, NULL, flags));
1209 }
1210 
1211 /*
1212  * scsipi_start:
1213  *
1214  *	Send a START UNIT.
1215  */
1216 int
1217 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1218 {
1219 	struct scsipi_start_stop cmd;
1220 
1221 	memset(&cmd, 0, sizeof(cmd));
1222 	cmd.opcode = START_STOP;
1223 	cmd.byte2 = 0x00;
1224 	cmd.how = type;
1225 
1226 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1227 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1228 }
1229 
1230 /*
1231  * scsipi_mode_sense, scsipi_mode_sense_big:
1232  *	get a sense page from a device
1233  */
1234 
1235 int
1236 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1237     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1238     int timeout)
1239 {
1240 	struct scsi_mode_sense_6 cmd;
1241 
1242 	memset(&cmd, 0, sizeof(cmd));
1243 	cmd.opcode = SCSI_MODE_SENSE_6;
1244 	cmd.byte2 = byte2;
1245 	cmd.page = page;
1246 	cmd.length = len & 0xff;
1247 
1248 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1249 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1250 }
1251 
1252 int
1253 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1254     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1255     int timeout)
1256 {
1257 	struct scsi_mode_sense_10 cmd;
1258 
1259 	memset(&cmd, 0, sizeof(cmd));
1260 	cmd.opcode = SCSI_MODE_SENSE_10;
1261 	cmd.byte2 = byte2;
1262 	cmd.page = page;
1263 	_lto2b(len, cmd.length);
1264 
1265 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1266 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1267 }
1268 
1269 int
1270 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1271     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1272     int timeout)
1273 {
1274 	struct scsi_mode_select_6 cmd;
1275 
1276 	memset(&cmd, 0, sizeof(cmd));
1277 	cmd.opcode = SCSI_MODE_SELECT_6;
1278 	cmd.byte2 = byte2;
1279 	cmd.length = len & 0xff;
1280 
1281 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1282 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1283 }
1284 
1285 int
1286 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1287     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1288     int timeout)
1289 {
1290 	struct scsi_mode_select_10 cmd;
1291 
1292 	memset(&cmd, 0, sizeof(cmd));
1293 	cmd.opcode = SCSI_MODE_SELECT_10;
1294 	cmd.byte2 = byte2;
1295 	_lto2b(len, cmd.length);
1296 
1297 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1298 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1299 }
1300 
1301 /*
1302  * scsipi_done:
1303  *
1304  *	This routine is called by an adapter's interrupt handler when
1305  *	an xfer is completed.
1306  */
1307 void
1308 scsipi_done(struct scsipi_xfer *xs)
1309 {
1310 	struct scsipi_periph *periph = xs->xs_periph;
1311 	struct scsipi_channel *chan = periph->periph_channel;
1312 	int s, freezecnt;
1313 
1314 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1315 #ifdef SCSIPI_DEBUG
1316 	if (periph->periph_dbflags & SCSIPI_DB1)
1317 		show_scsipi_cmd(xs);
1318 #endif
1319 
1320 	s = splbio();
1321 	/*
1322 	 * The resource this command was using is now free.
1323 	 */
1324 	if (xs->xs_status & XS_STS_DONE) {
1325 		/* XXX in certain circumstances, such as a device
1326 		 * being detached, a xs that has already been
1327 		 * scsipi_done()'d by the main thread will be done'd
1328 		 * again by scsibusdetach(). Putting the xs on the
1329 		 * chan_complete queue causes list corruption and
1330 		 * everyone dies. This prevents that, but perhaps
1331 		 * there should be better coordination somewhere such
1332 		 * that this won't ever happen (and can be turned into
1333 		 * a KASSERT().
1334 		 */
1335 		splx(s);
1336 		goto out;
1337 	}
1338 	scsipi_put_resource(chan);
1339 	xs->xs_periph->periph_sent--;
1340 
1341 	/*
1342 	 * If the command was tagged, free the tag.
1343 	 */
1344 	if (XS_CTL_TAGTYPE(xs) != 0)
1345 		scsipi_put_tag(xs);
1346 	else
1347 		periph->periph_flags &= ~PERIPH_UNTAG;
1348 
1349 	/* Mark the command as `done'. */
1350 	xs->xs_status |= XS_STS_DONE;
1351 
1352 #ifdef DIAGNOSTIC
1353 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1354 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1355 		panic("scsipi_done: ASYNC and POLL");
1356 #endif
1357 
1358 	/*
1359 	 * If the xfer had an error of any sort, freeze the
1360 	 * periph's queue.  Freeze it again if we were requested
1361 	 * to do so in the xfer.
1362 	 */
1363 	freezecnt = 0;
1364 	if (xs->error != XS_NOERROR)
1365 		freezecnt++;
1366 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1367 		freezecnt++;
1368 	if (freezecnt != 0)
1369 		scsipi_periph_freeze(periph, freezecnt);
1370 
1371 	/*
1372 	 * record the xfer with a pending sense, in case a SCSI reset is
1373 	 * received before the thread is waked up.
1374 	 */
1375 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1376 		periph->periph_flags |= PERIPH_SENSE;
1377 		periph->periph_xscheck = xs;
1378 	}
1379 
1380 	/*
1381 	 * If this was an xfer that was not to complete asynchronously,
1382 	 * let the requesting thread perform error checking/handling
1383 	 * in its context.
1384 	 */
1385 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1386 		splx(s);
1387 		/*
1388 		 * If it's a polling job, just return, to unwind the
1389 		 * call graph.  We don't need to restart the queue,
1390 		 * because pollings jobs are treated specially, and
1391 		 * are really only used during crash dumps anyway
1392 		 * (XXX or during boot-time autconfiguration of
1393 		 * ATAPI devices).
1394 		 */
1395 		if (xs->xs_control & XS_CTL_POLL)
1396 			return;
1397 		wakeup(xs);
1398 		goto out;
1399 	}
1400 
1401 	/*
1402 	 * Catch the extremely common case of I/O completing
1403 	 * without error; no use in taking a context switch
1404 	 * if we can handle it in interrupt context.
1405 	 */
1406 	if (xs->error == XS_NOERROR) {
1407 		splx(s);
1408 		(void) scsipi_complete(xs);
1409 		goto out;
1410 	}
1411 
1412 	/*
1413 	 * There is an error on this xfer.  Put it on the channel's
1414 	 * completion queue, and wake up the completion thread.
1415 	 */
1416 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1417 	splx(s);
1418 	wakeup(&chan->chan_complete);
1419 
1420  out:
1421 	/*
1422 	 * If there are more xfers on the channel's queue, attempt to
1423 	 * run them.
1424 	 */
1425 	scsipi_run_queue(chan);
1426 }
1427 
1428 /*
1429  * scsipi_complete:
1430  *
1431  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1432  *
1433  *	NOTE: This routine MUST be called with valid thread context
1434  *	except for the case where the following two conditions are
1435  *	true:
1436  *
1437  *		xs->error == XS_NOERROR
1438  *		XS_CTL_ASYNC is set in xs->xs_control
1439  *
1440  *	The semantics of this routine can be tricky, so here is an
1441  *	explanation:
1442  *
1443  *		0		Xfer completed successfully.
1444  *
1445  *		ERESTART	Xfer had an error, but was restarted.
1446  *
1447  *		anything else	Xfer had an error, return value is Unix
1448  *				errno.
1449  *
1450  *	If the return value is anything but ERESTART:
1451  *
1452  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1453  *		  the pool.
1454  *		- If there is a buf associated with the xfer,
1455  *		  it has been biodone()'d.
1456  */
1457 static int
1458 scsipi_complete(struct scsipi_xfer *xs)
1459 {
1460 	struct scsipi_periph *periph = xs->xs_periph;
1461 	struct scsipi_channel *chan = periph->periph_channel;
1462 	int error, s;
1463 
1464 #ifdef DIAGNOSTIC
1465 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1466 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1467 #endif
1468 	/*
1469 	 * If command terminated with a CHECK CONDITION, we need to issue a
1470 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1471 	 * we'll have the real status.
1472 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1473 	 * for this command.
1474 	 */
1475 	s = splbio();
1476 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1477 		/* request sense for a request sense ? */
1478 		if (xs->xs_control & XS_CTL_REQSENSE) {
1479 			scsipi_printaddr(periph);
1480 			printf("request sense for a request sense ?\n");
1481 			/* XXX maybe we should reset the device ? */
1482 			/* we've been frozen because xs->error != XS_NOERROR */
1483 			scsipi_periph_thaw(periph, 1);
1484 			splx(s);
1485 			if (xs->resid < xs->datalen) {
1486 				printf("we read %d bytes of sense anyway:\n",
1487 				    xs->datalen - xs->resid);
1488 #ifdef SCSIVERBOSE
1489 				scsipi_print_sense_data((void *)xs->data, 0);
1490 #endif
1491 			}
1492 			return EINVAL;
1493 		}
1494 		scsipi_request_sense(xs);
1495 	}
1496 	splx(s);
1497 
1498 	/*
1499 	 * If it's a user level request, bypass all usual completion
1500 	 * processing, let the user work it out..
1501 	 */
1502 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1503 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1504 		if (xs->error != XS_NOERROR)
1505 			scsipi_periph_thaw(periph, 1);
1506 		scsipi_user_done(xs);
1507 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1508 		return 0;
1509 	}
1510 
1511 	switch (xs->error) {
1512 	case XS_NOERROR:
1513 		error = 0;
1514 		break;
1515 
1516 	case XS_SENSE:
1517 	case XS_SHORTSENSE:
1518 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1519 		break;
1520 
1521 	case XS_RESOURCE_SHORTAGE:
1522 		/*
1523 		 * XXX Should freeze channel's queue.
1524 		 */
1525 		scsipi_printaddr(periph);
1526 		printf("adapter resource shortage\n");
1527 		/* FALLTHROUGH */
1528 
1529 	case XS_BUSY:
1530 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1531 			struct scsipi_max_openings mo;
1532 
1533 			/*
1534 			 * We set the openings to active - 1, assuming that
1535 			 * the command that got us here is the first one that
1536 			 * can't fit into the device's queue.  If that's not
1537 			 * the case, I guess we'll find out soon enough.
1538 			 */
1539 			mo.mo_target = periph->periph_target;
1540 			mo.mo_lun = periph->periph_lun;
1541 			if (periph->periph_active < periph->periph_openings)
1542 				mo.mo_openings = periph->periph_active - 1;
1543 			else
1544 				mo.mo_openings = periph->periph_openings - 1;
1545 #ifdef DIAGNOSTIC
1546 			if (mo.mo_openings < 0) {
1547 				scsipi_printaddr(periph);
1548 				printf("QUEUE FULL resulted in < 0 openings\n");
1549 				panic("scsipi_done");
1550 			}
1551 #endif
1552 			if (mo.mo_openings == 0) {
1553 				scsipi_printaddr(periph);
1554 				printf("QUEUE FULL resulted in 0 openings\n");
1555 				mo.mo_openings = 1;
1556 			}
1557 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1558 			error = ERESTART;
1559 		} else if (xs->xs_retries != 0) {
1560 			xs->xs_retries--;
1561 			/*
1562 			 * Wait one second, and try again.
1563 			 */
1564 			if ((xs->xs_control & XS_CTL_POLL) ||
1565 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1566 				delay(1000000);
1567 			} else if (!callout_pending(&periph->periph_callout)) {
1568 				scsipi_periph_freeze(periph, 1);
1569 				callout_reset(&periph->periph_callout,
1570 				    hz, scsipi_periph_timed_thaw, periph);
1571 			}
1572 			error = ERESTART;
1573 		} else
1574 			error = EBUSY;
1575 		break;
1576 
1577 	case XS_REQUEUE:
1578 		error = ERESTART;
1579 		break;
1580 
1581 	case XS_SELTIMEOUT:
1582 	case XS_TIMEOUT:
1583 		/*
1584 		 * If the device hasn't gone away, honor retry counts.
1585 		 *
1586 		 * Note that if we're in the middle of probing it,
1587 		 * it won't be found because it isn't here yet so
1588 		 * we won't honor the retry count in that case.
1589 		 */
1590 		if (scsipi_lookup_periph(chan, periph->periph_target,
1591 		    periph->periph_lun) && xs->xs_retries != 0) {
1592 			xs->xs_retries--;
1593 			error = ERESTART;
1594 		} else
1595 			error = EIO;
1596 		break;
1597 
1598 	case XS_RESET:
1599 		if (xs->xs_control & XS_CTL_REQSENSE) {
1600 			/*
1601 			 * request sense interrupted by reset: signal it
1602 			 * with EINTR return code.
1603 			 */
1604 			error = EINTR;
1605 		} else {
1606 			if (xs->xs_retries != 0) {
1607 				xs->xs_retries--;
1608 				error = ERESTART;
1609 			} else
1610 				error = EIO;
1611 		}
1612 		break;
1613 
1614 	case XS_DRIVER_STUFFUP:
1615 		scsipi_printaddr(periph);
1616 		printf("generic HBA error\n");
1617 		error = EIO;
1618 		break;
1619 	default:
1620 		scsipi_printaddr(periph);
1621 		printf("invalid return code from adapter: %d\n", xs->error);
1622 		error = EIO;
1623 		break;
1624 	}
1625 
1626 	s = splbio();
1627 	if (error == ERESTART) {
1628 		/*
1629 		 * If we get here, the periph has been thawed and frozen
1630 		 * again if we had to issue recovery commands.  Alternatively,
1631 		 * it may have been frozen again and in a timed thaw.  In
1632 		 * any case, we thaw the periph once we re-enqueue the
1633 		 * command.  Once the periph is fully thawed, it will begin
1634 		 * operation again.
1635 		 */
1636 		xs->error = XS_NOERROR;
1637 		xs->status = SCSI_OK;
1638 		xs->xs_status &= ~XS_STS_DONE;
1639 		xs->xs_requeuecnt++;
1640 		error = scsipi_enqueue(xs);
1641 		if (error == 0) {
1642 			scsipi_periph_thaw(periph, 1);
1643 			splx(s);
1644 			return (ERESTART);
1645 		}
1646 	}
1647 
1648 	/*
1649 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1650 	 * Thaw it here.
1651 	 */
1652 	if (xs->error != XS_NOERROR)
1653 		scsipi_periph_thaw(periph, 1);
1654 
1655 	if (periph->periph_switch->psw_done)
1656 		periph->periph_switch->psw_done(xs, error);
1657 
1658 	if (xs->xs_control & XS_CTL_ASYNC)
1659 		scsipi_put_xs(xs);
1660 	splx(s);
1661 
1662 	return (error);
1663 }
1664 
1665 /*
1666  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1667  * returns with a CHECK_CONDITION status. Must be called in valid thread
1668  * context and at splbio().
1669  */
1670 
1671 static void
1672 scsipi_request_sense(struct scsipi_xfer *xs)
1673 {
1674 	struct scsipi_periph *periph = xs->xs_periph;
1675 	int flags, error;
1676 	struct scsi_request_sense cmd;
1677 
1678 	periph->periph_flags |= PERIPH_SENSE;
1679 
1680 	/* if command was polling, request sense will too */
1681 	flags = xs->xs_control & XS_CTL_POLL;
1682 	/* Polling commands can't sleep */
1683 	if (flags)
1684 		flags |= XS_CTL_NOSLEEP;
1685 
1686 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1687 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1688 
1689 	memset(&cmd, 0, sizeof(cmd));
1690 	cmd.opcode = SCSI_REQUEST_SENSE;
1691 	cmd.length = sizeof(struct scsi_sense_data);
1692 
1693 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1694 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1695 	    0, 1000, NULL, flags);
1696 	periph->periph_flags &= ~PERIPH_SENSE;
1697 	periph->periph_xscheck = NULL;
1698 	switch (error) {
1699 	case 0:
1700 		/* we have a valid sense */
1701 		xs->error = XS_SENSE;
1702 		return;
1703 	case EINTR:
1704 		/* REQUEST_SENSE interrupted by bus reset. */
1705 		xs->error = XS_RESET;
1706 		return;
1707 	case EIO:
1708 		 /* request sense coudn't be performed */
1709 		/*
1710 		 * XXX this isn't quite right but we don't have anything
1711 		 * better for now
1712 		 */
1713 		xs->error = XS_DRIVER_STUFFUP;
1714 		return;
1715 	default:
1716 		 /* Notify that request sense failed. */
1717 		xs->error = XS_DRIVER_STUFFUP;
1718 		scsipi_printaddr(periph);
1719 		printf("request sense failed with error %d\n", error);
1720 		return;
1721 	}
1722 }
1723 
1724 /*
1725  * scsipi_enqueue:
1726  *
1727  *	Enqueue an xfer on a channel.
1728  */
1729 static int
1730 scsipi_enqueue(struct scsipi_xfer *xs)
1731 {
1732 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1733 	struct scsipi_xfer *qxs;
1734 	int s;
1735 
1736 	s = splbio();
1737 
1738 	/*
1739 	 * If the xfer is to be polled, and there are already jobs on
1740 	 * the queue, we can't proceed.
1741 	 */
1742 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1743 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1744 		splx(s);
1745 		xs->error = XS_DRIVER_STUFFUP;
1746 		return (EAGAIN);
1747 	}
1748 
1749 	/*
1750 	 * If we have an URGENT xfer, it's an error recovery command
1751 	 * and it should just go on the head of the channel's queue.
1752 	 */
1753 	if (xs->xs_control & XS_CTL_URGENT) {
1754 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1755 		goto out;
1756 	}
1757 
1758 	/*
1759 	 * If this xfer has already been on the queue before, we
1760 	 * need to reinsert it in the correct order.  That order is:
1761 	 *
1762 	 *	Immediately before the first xfer for this periph
1763 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1764 	 *
1765 	 * Failing that, at the end of the queue.  (We'll end up
1766 	 * there naturally.)
1767 	 */
1768 	if (xs->xs_requeuecnt != 0) {
1769 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1770 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1771 			if (qxs->xs_periph == xs->xs_periph &&
1772 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1773 				break;
1774 		}
1775 		if (qxs != NULL) {
1776 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1777 			    channel_q);
1778 			goto out;
1779 		}
1780 	}
1781 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1782  out:
1783 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1784 		scsipi_periph_thaw(xs->xs_periph, 1);
1785 	splx(s);
1786 	return (0);
1787 }
1788 
1789 /*
1790  * scsipi_run_queue:
1791  *
1792  *	Start as many xfers as possible running on the channel.
1793  */
1794 static void
1795 scsipi_run_queue(struct scsipi_channel *chan)
1796 {
1797 	struct scsipi_xfer *xs;
1798 	struct scsipi_periph *periph;
1799 	int s;
1800 
1801 	for (;;) {
1802 		s = splbio();
1803 
1804 		/*
1805 		 * If the channel is frozen, we can't do any work right
1806 		 * now.
1807 		 */
1808 		if (chan->chan_qfreeze != 0) {
1809 			splx(s);
1810 			return;
1811 		}
1812 
1813 		/*
1814 		 * Look for work to do, and make sure we can do it.
1815 		 */
1816 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1817 		     xs = TAILQ_NEXT(xs, channel_q)) {
1818 			periph = xs->xs_periph;
1819 
1820 			if ((periph->periph_sent >= periph->periph_openings) ||
1821 			    periph->periph_qfreeze != 0 ||
1822 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1823 				continue;
1824 
1825 			if ((periph->periph_flags &
1826 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1827 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1828 				continue;
1829 
1830 			/*
1831 			 * We can issue this xfer!
1832 			 */
1833 			goto got_one;
1834 		}
1835 
1836 		/*
1837 		 * Can't find any work to do right now.
1838 		 */
1839 		splx(s);
1840 		return;
1841 
1842  got_one:
1843 		/*
1844 		 * Have an xfer to run.  Allocate a resource from
1845 		 * the adapter to run it.  If we can't allocate that
1846 		 * resource, we don't dequeue the xfer.
1847 		 */
1848 		if (scsipi_get_resource(chan) == 0) {
1849 			/*
1850 			 * Adapter is out of resources.  If the adapter
1851 			 * supports it, attempt to grow them.
1852 			 */
1853 			if (scsipi_grow_resources(chan) == 0) {
1854 				/*
1855 				 * Wasn't able to grow resources,
1856 				 * nothing more we can do.
1857 				 */
1858 				if (xs->xs_control & XS_CTL_POLL) {
1859 					scsipi_printaddr(xs->xs_periph);
1860 					printf("polling command but no "
1861 					    "adapter resources");
1862 					/* We'll panic shortly... */
1863 				}
1864 				splx(s);
1865 
1866 				/*
1867 				 * XXX: We should be able to note that
1868 				 * XXX: that resources are needed here!
1869 				 */
1870 				return;
1871 			}
1872 			/*
1873 			 * scsipi_grow_resources() allocated the resource
1874 			 * for us.
1875 			 */
1876 		}
1877 
1878 		/*
1879 		 * We have a resource to run this xfer, do it!
1880 		 */
1881 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1882 
1883 		/*
1884 		 * If the command is to be tagged, allocate a tag ID
1885 		 * for it.
1886 		 */
1887 		if (XS_CTL_TAGTYPE(xs) != 0)
1888 			scsipi_get_tag(xs);
1889 		else
1890 			periph->periph_flags |= PERIPH_UNTAG;
1891 		periph->periph_sent++;
1892 		splx(s);
1893 
1894 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1895 	}
1896 #ifdef DIAGNOSTIC
1897 	panic("scsipi_run_queue: impossible");
1898 #endif
1899 }
1900 
1901 /*
1902  * scsipi_execute_xs:
1903  *
1904  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1905  */
1906 int
1907 scsipi_execute_xs(struct scsipi_xfer *xs)
1908 {
1909 	struct scsipi_periph *periph = xs->xs_periph;
1910 	struct scsipi_channel *chan = periph->periph_channel;
1911 	int oasync, async, poll, error, s;
1912 
1913 	KASSERT(!cold);
1914 
1915 	(chan->chan_bustype->bustype_cmd)(xs);
1916 
1917 	if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1918 #if 1
1919 		if (xs->xs_control & XS_CTL_ASYNC)
1920 			panic("scsipi_execute_xs: on stack and async");
1921 #endif
1922 		/*
1923 		 * If the I/O buffer is allocated on stack, the
1924 		 * process must NOT be swapped out, as the device will
1925 		 * be accessing the stack.
1926 		 */
1927 		PHOLD(curlwp);
1928 	}
1929 
1930 	xs->xs_status &= ~XS_STS_DONE;
1931 	xs->error = XS_NOERROR;
1932 	xs->resid = xs->datalen;
1933 	xs->status = SCSI_OK;
1934 
1935 #ifdef SCSIPI_DEBUG
1936 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1937 		printf("scsipi_execute_xs: ");
1938 		show_scsipi_xs(xs);
1939 		printf("\n");
1940 	}
1941 #endif
1942 
1943 	/*
1944 	 * Deal with command tagging:
1945 	 *
1946 	 *	- If the device's current operating mode doesn't
1947 	 *	  include tagged queueing, clear the tag mask.
1948 	 *
1949 	 *	- If the device's current operating mode *does*
1950 	 *	  include tagged queueing, set the tag_type in
1951 	 *	  the xfer to the appropriate byte for the tag
1952 	 *	  message.
1953 	 */
1954 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1955 		(xs->xs_control & XS_CTL_REQSENSE)) {
1956 		xs->xs_control &= ~XS_CTL_TAGMASK;
1957 		xs->xs_tag_type = 0;
1958 	} else {
1959 		/*
1960 		 * If the request doesn't specify a tag, give Head
1961 		 * tags to URGENT operations and Ordered tags to
1962 		 * everything else.
1963 		 */
1964 		if (XS_CTL_TAGTYPE(xs) == 0) {
1965 			if (xs->xs_control & XS_CTL_URGENT)
1966 				xs->xs_control |= XS_CTL_HEAD_TAG;
1967 			else
1968 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1969 		}
1970 
1971 		switch (XS_CTL_TAGTYPE(xs)) {
1972 		case XS_CTL_ORDERED_TAG:
1973 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1974 			break;
1975 
1976 		case XS_CTL_SIMPLE_TAG:
1977 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1978 			break;
1979 
1980 		case XS_CTL_HEAD_TAG:
1981 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1982 			break;
1983 
1984 		default:
1985 			scsipi_printaddr(periph);
1986 			printf("invalid tag mask 0x%08x\n",
1987 			    XS_CTL_TAGTYPE(xs));
1988 			panic("scsipi_execute_xs");
1989 		}
1990 	}
1991 
1992 	/* If the adaptor wants us to poll, poll. */
1993 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1994 		xs->xs_control |= XS_CTL_POLL;
1995 
1996 	/*
1997 	 * If we don't yet have a completion thread, or we are to poll for
1998 	 * completion, clear the ASYNC flag.
1999 	 */
2000 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
2001 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2002 		xs->xs_control &= ~XS_CTL_ASYNC;
2003 
2004 	async = (xs->xs_control & XS_CTL_ASYNC);
2005 	poll = (xs->xs_control & XS_CTL_POLL);
2006 
2007 #ifdef DIAGNOSTIC
2008 	if (oasync != 0 && xs->bp == NULL)
2009 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2010 #endif
2011 
2012 	/*
2013 	 * Enqueue the transfer.  If we're not polling for completion, this
2014 	 * should ALWAYS return `no error'.
2015 	 */
2016 	error = scsipi_enqueue(xs);
2017 	if (error) {
2018 		if (poll == 0) {
2019 			scsipi_printaddr(periph);
2020 			printf("not polling, but enqueue failed with %d\n",
2021 			    error);
2022 			panic("scsipi_execute_xs");
2023 		}
2024 
2025 		scsipi_printaddr(periph);
2026 		printf("should have flushed queue?\n");
2027 		goto free_xs;
2028 	}
2029 
2030  restarted:
2031 	scsipi_run_queue(chan);
2032 
2033 	/*
2034 	 * The xfer is enqueued, and possibly running.  If it's to be
2035 	 * completed asynchronously, just return now.
2036 	 */
2037 	if (async)
2038 		return (0);
2039 
2040 	/*
2041 	 * Not an asynchronous command; wait for it to complete.
2042 	 */
2043 	s = splbio();
2044 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2045 		if (poll) {
2046 			scsipi_printaddr(periph);
2047 			printf("polling command not done\n");
2048 			panic("scsipi_execute_xs");
2049 		}
2050 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
2051 	}
2052 	splx(s);
2053 
2054 	/*
2055 	 * Command is complete.  scsipi_done() has awakened us to perform
2056 	 * the error handling.
2057 	 */
2058 	error = scsipi_complete(xs);
2059 	if (error == ERESTART)
2060 		goto restarted;
2061 
2062 	/*
2063 	 * If it was meant to run async and we cleared aync ourselve,
2064 	 * don't return an error here. It has already been handled
2065 	 */
2066 	if (oasync)
2067 		error = 0;
2068 	/*
2069 	 * Command completed successfully or fatal error occurred.  Fall
2070 	 * into....
2071 	 */
2072  free_xs:
2073 	if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2074 		PRELE(curlwp);
2075 
2076 	s = splbio();
2077 	scsipi_put_xs(xs);
2078 	splx(s);
2079 
2080 	/*
2081 	 * Kick the queue, keep it running in case it stopped for some
2082 	 * reason.
2083 	 */
2084 	scsipi_run_queue(chan);
2085 
2086 	return (error);
2087 }
2088 
2089 /*
2090  * scsipi_completion_thread:
2091  *
2092  *	This is the completion thread.  We wait for errors on
2093  *	asynchronous xfers, and perform the error handling
2094  *	function, restarting the command, if necessary.
2095  */
2096 static void
2097 scsipi_completion_thread(void *arg)
2098 {
2099 	struct scsipi_channel *chan = arg;
2100 	struct scsipi_xfer *xs;
2101 	int s;
2102 
2103 	if (chan->chan_init_cb)
2104 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2105 
2106 	s = splbio();
2107 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2108 	splx(s);
2109 	for (;;) {
2110 		s = splbio();
2111 		xs = TAILQ_FIRST(&chan->chan_complete);
2112 		if (xs == NULL && chan->chan_tflags  == 0) {
2113 			/* nothing to do; wait */
2114 			(void) tsleep(&chan->chan_complete, PRIBIO,
2115 			    "sccomp", 0);
2116 			splx(s);
2117 			continue;
2118 		}
2119 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2120 			/* call chan_callback from thread context */
2121 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2122 			chan->chan_callback(chan, chan->chan_callback_arg);
2123 			splx(s);
2124 			continue;
2125 		}
2126 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2127 			/* attempt to get more openings for this channel */
2128 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2129 			scsipi_adapter_request(chan,
2130 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2131 			scsipi_channel_thaw(chan, 1);
2132 			splx(s);
2133 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2134 				preempt(1);
2135 			}
2136 			continue;
2137 		}
2138 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2139 			/* explicitly run the queues for this channel */
2140 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2141 			scsipi_run_queue(chan);
2142 			splx(s);
2143 			continue;
2144 		}
2145 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2146 			splx(s);
2147 			break;
2148 		}
2149 		if (xs) {
2150 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2151 			splx(s);
2152 
2153 			/*
2154 			 * Have an xfer with an error; process it.
2155 			 */
2156 			(void) scsipi_complete(xs);
2157 
2158 			/*
2159 			 * Kick the queue; keep it running if it was stopped
2160 			 * for some reason.
2161 			 */
2162 			scsipi_run_queue(chan);
2163 		} else {
2164 			splx(s);
2165 		}
2166 	}
2167 
2168 	chan->chan_thread = NULL;
2169 
2170 	/* In case parent is waiting for us to exit. */
2171 	wakeup(&chan->chan_thread);
2172 
2173 	kthread_exit(0);
2174 }
2175 
2176 /*
2177  * scsipi_create_completion_thread:
2178  *
2179  *	Callback to actually create the completion thread.
2180  */
2181 void
2182 scsipi_create_completion_thread(void *arg)
2183 {
2184 	struct scsipi_channel *chan = arg;
2185 	struct scsipi_adapter *adapt = chan->chan_adapter;
2186 
2187 	if (kthread_create1(scsipi_completion_thread, chan,
2188 	    &chan->chan_thread, "%s", chan->chan_name)) {
2189 		printf("%s: unable to create completion thread for "
2190 		    "channel %d\n", adapt->adapt_dev->dv_xname,
2191 		    chan->chan_channel);
2192 		panic("scsipi_create_completion_thread");
2193 	}
2194 }
2195 
2196 /*
2197  * scsipi_thread_call_callback:
2198  *
2199  * 	request to call a callback from the completion thread
2200  */
2201 int
2202 scsipi_thread_call_callback(struct scsipi_channel *chan,
2203     void (*callback)(struct scsipi_channel *, void *), void *arg)
2204 {
2205 	int s;
2206 
2207 	s = splbio();
2208 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2209 		/* kernel thread doesn't exist yet */
2210 		splx(s);
2211 		return ESRCH;
2212 	}
2213 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2214 		splx(s);
2215 		return EBUSY;
2216 	}
2217 	scsipi_channel_freeze(chan, 1);
2218 	chan->chan_callback = callback;
2219 	chan->chan_callback_arg = arg;
2220 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2221 	wakeup(&chan->chan_complete);
2222 	splx(s);
2223 	return(0);
2224 }
2225 
2226 /*
2227  * scsipi_async_event:
2228  *
2229  *	Handle an asynchronous event from an adapter.
2230  */
2231 void
2232 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2233     void *arg)
2234 {
2235 	int s;
2236 
2237 	s = splbio();
2238 	switch (event) {
2239 	case ASYNC_EVENT_MAX_OPENINGS:
2240 		scsipi_async_event_max_openings(chan,
2241 		    (struct scsipi_max_openings *)arg);
2242 		break;
2243 
2244 	case ASYNC_EVENT_XFER_MODE:
2245 		scsipi_async_event_xfer_mode(chan,
2246 		    (struct scsipi_xfer_mode *)arg);
2247 		break;
2248 	case ASYNC_EVENT_RESET:
2249 		scsipi_async_event_channel_reset(chan);
2250 		break;
2251 	}
2252 	splx(s);
2253 }
2254 
2255 /*
2256  * scsipi_print_xfer_mode:
2257  *
2258  *	Print a periph's capabilities.
2259  */
2260 void
2261 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2262 {
2263 	int period, freq, speed, mbs;
2264 
2265 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2266 		return;
2267 
2268 	aprint_normal("%s: ", periph->periph_dev->dv_xname);
2269 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2270 		period = scsipi_sync_factor_to_period(periph->periph_period);
2271 		aprint_normal("sync (%d.%02dns offset %d)",
2272 		    period / 100, period % 100, periph->periph_offset);
2273 	} else
2274 		aprint_normal("async");
2275 
2276 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2277 		aprint_normal(", 32-bit");
2278 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2279 		aprint_normal(", 16-bit");
2280 	else
2281 		aprint_normal(", 8-bit");
2282 
2283 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2284 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2285 		speed = freq;
2286 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2287 			speed *= 4;
2288 		else if (periph->periph_mode &
2289 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2290 			speed *= 2;
2291 		mbs = speed / 1000;
2292 		if (mbs > 0)
2293 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2294 		else
2295 			aprint_normal(" (%dKB/s)", speed % 1000);
2296 	}
2297 
2298 	aprint_normal(" transfers");
2299 
2300 	if (periph->periph_mode & PERIPH_CAP_TQING)
2301 		aprint_normal(", tagged queueing");
2302 
2303 	aprint_normal("\n");
2304 }
2305 
2306 /*
2307  * scsipi_async_event_max_openings:
2308  *
2309  *	Update the maximum number of outstanding commands a
2310  *	device may have.
2311  */
2312 static void
2313 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2314     struct scsipi_max_openings *mo)
2315 {
2316 	struct scsipi_periph *periph;
2317 	int minlun, maxlun;
2318 
2319 	if (mo->mo_lun == -1) {
2320 		/*
2321 		 * Wildcarded; apply it to all LUNs.
2322 		 */
2323 		minlun = 0;
2324 		maxlun = chan->chan_nluns - 1;
2325 	} else
2326 		minlun = maxlun = mo->mo_lun;
2327 
2328 	/* XXX This could really suck with a large LUN space. */
2329 	for (; minlun <= maxlun; minlun++) {
2330 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2331 		if (periph == NULL)
2332 			continue;
2333 
2334 		if (mo->mo_openings < periph->periph_openings)
2335 			periph->periph_openings = mo->mo_openings;
2336 		else if (mo->mo_openings > periph->periph_openings &&
2337 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2338 			periph->periph_openings = mo->mo_openings;
2339 	}
2340 }
2341 
2342 /*
2343  * scsipi_async_event_xfer_mode:
2344  *
2345  *	Update the xfer mode for all periphs sharing the
2346  *	specified I_T Nexus.
2347  */
2348 static void
2349 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2350     struct scsipi_xfer_mode *xm)
2351 {
2352 	struct scsipi_periph *periph;
2353 	int lun, announce, mode, period, offset;
2354 
2355 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2356 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2357 		if (periph == NULL)
2358 			continue;
2359 		announce = 0;
2360 
2361 		/*
2362 		 * Clamp the xfer mode down to this periph's capabilities.
2363 		 */
2364 		mode = xm->xm_mode & periph->periph_cap;
2365 		if (mode & PERIPH_CAP_SYNC) {
2366 			period = xm->xm_period;
2367 			offset = xm->xm_offset;
2368 		} else {
2369 			period = 0;
2370 			offset = 0;
2371 		}
2372 
2373 		/*
2374 		 * If we do not have a valid xfer mode yet, or the parameters
2375 		 * are different, announce them.
2376 		 */
2377 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2378 		    periph->periph_mode != mode ||
2379 		    periph->periph_period != period ||
2380 		    periph->periph_offset != offset)
2381 			announce = 1;
2382 
2383 		periph->periph_mode = mode;
2384 		periph->periph_period = period;
2385 		periph->periph_offset = offset;
2386 		periph->periph_flags |= PERIPH_MODE_VALID;
2387 
2388 		if (announce)
2389 			scsipi_print_xfer_mode(periph);
2390 	}
2391 }
2392 
2393 /*
2394  * scsipi_set_xfer_mode:
2395  *
2396  *	Set the xfer mode for the specified I_T Nexus.
2397  */
2398 void
2399 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2400 {
2401 	struct scsipi_xfer_mode xm;
2402 	struct scsipi_periph *itperiph;
2403 	int lun, s;
2404 
2405 	/*
2406 	 * Go to the minimal xfer mode.
2407 	 */
2408 	xm.xm_target = target;
2409 	xm.xm_mode = 0;
2410 	xm.xm_period = 0;			/* ignored */
2411 	xm.xm_offset = 0;			/* ignored */
2412 
2413 	/*
2414 	 * Find the first LUN we know about on this I_T Nexus.
2415 	 */
2416 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2417 		itperiph = scsipi_lookup_periph(chan, target, lun);
2418 		if (itperiph != NULL)
2419 			break;
2420 	}
2421 	if (itperiph != NULL) {
2422 		xm.xm_mode = itperiph->periph_cap;
2423 		/*
2424 		 * Now issue the request to the adapter.
2425 		 */
2426 		s = splbio();
2427 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2428 		splx(s);
2429 		/*
2430 		 * If we want this to happen immediately, issue a dummy
2431 		 * command, since most adapters can't really negotiate unless
2432 		 * they're executing a job.
2433 		 */
2434 		if (immed != 0) {
2435 			(void) scsipi_test_unit_ready(itperiph,
2436 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2437 			    XS_CTL_IGNORE_NOT_READY |
2438 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2439 		}
2440 	}
2441 }
2442 
2443 /*
2444  * scsipi_channel_reset:
2445  *
2446  *	handle scsi bus reset
2447  * called at splbio
2448  */
2449 static void
2450 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2451 {
2452 	struct scsipi_xfer *xs, *xs_next;
2453 	struct scsipi_periph *periph;
2454 	int target, lun;
2455 
2456 	/*
2457 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2458 	 * commands; as the sense is not available any more.
2459 	 * can't call scsipi_done() from here, as the command has not been
2460 	 * sent to the adapter yet (this would corrupt accounting).
2461 	 */
2462 
2463 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2464 		xs_next = TAILQ_NEXT(xs, channel_q);
2465 		if (xs->xs_control & XS_CTL_REQSENSE) {
2466 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2467 			xs->error = XS_RESET;
2468 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2469 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2470 				    channel_q);
2471 		}
2472 	}
2473 	wakeup(&chan->chan_complete);
2474 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2475 	for (target = 0; target < chan->chan_ntargets; target++) {
2476 		if (target == chan->chan_id)
2477 			continue;
2478 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2479 			periph = scsipi_lookup_periph(chan, target, lun);
2480 			if (periph) {
2481 				xs = periph->periph_xscheck;
2482 				if (xs)
2483 					xs->error = XS_RESET;
2484 			}
2485 		}
2486 	}
2487 }
2488 
2489 /*
2490  * scsipi_target_detach:
2491  *
2492  *	detach all periph associated with a I_T
2493  * 	must be called from valid thread context
2494  */
2495 int
2496 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2497     int flags)
2498 {
2499 	struct scsipi_periph *periph;
2500 	int ctarget, mintarget, maxtarget;
2501 	int clun, minlun, maxlun;
2502 	int error;
2503 
2504 	if (target == -1) {
2505 		mintarget = 0;
2506 		maxtarget = chan->chan_ntargets;
2507 	} else {
2508 		if (target == chan->chan_id)
2509 			return EINVAL;
2510 		if (target < 0 || target >= chan->chan_ntargets)
2511 			return EINVAL;
2512 		mintarget = target;
2513 		maxtarget = target + 1;
2514 	}
2515 
2516 	if (lun == -1) {
2517 		minlun = 0;
2518 		maxlun = chan->chan_nluns;
2519 	} else {
2520 		if (lun < 0 || lun >= chan->chan_nluns)
2521 			return EINVAL;
2522 		minlun = lun;
2523 		maxlun = lun + 1;
2524 	}
2525 
2526 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2527 		if (ctarget == chan->chan_id)
2528 			continue;
2529 
2530 		for (clun = minlun; clun < maxlun; clun++) {
2531 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2532 			if (periph == NULL)
2533 				continue;
2534 			error = config_detach(periph->periph_dev, flags);
2535 			if (error)
2536 				return (error);
2537 		}
2538 	}
2539 	return(0);
2540 }
2541 
2542 /*
2543  * scsipi_adapter_addref:
2544  *
2545  *	Add a reference to the adapter pointed to by the provided
2546  *	link, enabling the adapter if necessary.
2547  */
2548 int
2549 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2550 {
2551 	int s, error = 0;
2552 
2553 	s = splbio();
2554 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2555 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2556 		if (error)
2557 			adapt->adapt_refcnt--;
2558 	}
2559 	splx(s);
2560 	return (error);
2561 }
2562 
2563 /*
2564  * scsipi_adapter_delref:
2565  *
2566  *	Delete a reference to the adapter pointed to by the provided
2567  *	link, disabling the adapter if possible.
2568  */
2569 void
2570 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2571 {
2572 	int s;
2573 
2574 	s = splbio();
2575 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2576 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2577 	splx(s);
2578 }
2579 
2580 static struct scsipi_syncparam {
2581 	int	ss_factor;
2582 	int	ss_period;	/* ns * 100 */
2583 } scsipi_syncparams[] = {
2584 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2585 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2586 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2587 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2588 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2589 };
2590 static const int scsipi_nsyncparams =
2591     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2592 
2593 int
2594 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2595 {
2596 	int i;
2597 
2598 	for (i = 0; i < scsipi_nsyncparams; i++) {
2599 		if (period <= scsipi_syncparams[i].ss_period)
2600 			return (scsipi_syncparams[i].ss_factor);
2601 	}
2602 
2603 	return ((period / 100) / 4);
2604 }
2605 
2606 int
2607 scsipi_sync_factor_to_period(int factor)
2608 {
2609 	int i;
2610 
2611 	for (i = 0; i < scsipi_nsyncparams; i++) {
2612 		if (factor == scsipi_syncparams[i].ss_factor)
2613 			return (scsipi_syncparams[i].ss_period);
2614 	}
2615 
2616 	return ((factor * 4) * 100);
2617 }
2618 
2619 int
2620 scsipi_sync_factor_to_freq(int factor)
2621 {
2622 	int i;
2623 
2624 	for (i = 0; i < scsipi_nsyncparams; i++) {
2625 		if (factor == scsipi_syncparams[i].ss_factor)
2626 			return (100000000 / scsipi_syncparams[i].ss_period);
2627 	}
2628 
2629 	return (10000000 / ((factor * 4) * 10));
2630 }
2631 
2632 #ifdef SCSIPI_DEBUG
2633 /*
2634  * Given a scsipi_xfer, dump the request, in all it's glory
2635  */
2636 void
2637 show_scsipi_xs(struct scsipi_xfer *xs)
2638 {
2639 
2640 	printf("xs(%p): ", xs);
2641 	printf("xs_control(0x%08x)", xs->xs_control);
2642 	printf("xs_status(0x%08x)", xs->xs_status);
2643 	printf("periph(%p)", xs->xs_periph);
2644 	printf("retr(0x%x)", xs->xs_retries);
2645 	printf("timo(0x%x)", xs->timeout);
2646 	printf("cmd(%p)", xs->cmd);
2647 	printf("len(0x%x)", xs->cmdlen);
2648 	printf("data(%p)", xs->data);
2649 	printf("len(0x%x)", xs->datalen);
2650 	printf("res(0x%x)", xs->resid);
2651 	printf("err(0x%x)", xs->error);
2652 	printf("bp(%p)", xs->bp);
2653 	show_scsipi_cmd(xs);
2654 }
2655 
2656 void
2657 show_scsipi_cmd(struct scsipi_xfer *xs)
2658 {
2659 	u_char *b = (u_char *) xs->cmd;
2660 	int i = 0;
2661 
2662 	scsipi_printaddr(xs->xs_periph);
2663 	printf(" command: ");
2664 
2665 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2666 		while (i < xs->cmdlen) {
2667 			if (i)
2668 				printf(",");
2669 			printf("0x%x", b[i++]);
2670 		}
2671 		printf("-[%d bytes]\n", xs->datalen);
2672 		if (xs->datalen)
2673 			show_mem(xs->data, min(64, xs->datalen));
2674 	} else
2675 		printf("-RESET-\n");
2676 }
2677 
2678 void
2679 show_mem(u_char *address, int num)
2680 {
2681 	int x;
2682 
2683 	printf("------------------------------");
2684 	for (x = 0; x < num; x++) {
2685 		if ((x % 16) == 0)
2686 			printf("\n%03d: ", x);
2687 		printf("%02x ", *address++);
2688 	}
2689 	printf("\n------------------------------\n");
2690 }
2691 #endif /* SCSIPI_DEBUG */
2692