xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision df0caa2637da0538ecdf6b878c4d08e684b43d8f)
1 /*	$NetBSD: scsipi_base.c,v 1.131 2005/05/31 02:56:54 xtraeme Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *        This product includes software developed by the NetBSD
22  *        Foundation, Inc. and its contributors.
23  * 4. Neither the name of The NetBSD Foundation nor the names of its
24  *    contributors may be used to endorse or promote products derived
25  *    from this software without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37  * POSSIBILITY OF SUCH DAMAGE.
38  */
39 
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.131 2005/05/31 02:56:54 xtraeme Exp $");
42 
43 #include "opt_scsi.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/buf.h>
49 #include <sys/uio.h>
50 #include <sys/malloc.h>
51 #include <sys/pool.h>
52 #include <sys/errno.h>
53 #include <sys/device.h>
54 #include <sys/proc.h>
55 #include <sys/kthread.h>
56 #include <sys/hash.h>
57 
58 #include <uvm/uvm_extern.h>
59 
60 #include <dev/scsipi/scsi_spc.h>
61 #include <dev/scsipi/scsipi_all.h>
62 #include <dev/scsipi/scsipi_disk.h>
63 #include <dev/scsipi/scsipiconf.h>
64 #include <dev/scsipi/scsipi_base.h>
65 
66 #include <dev/scsipi/scsi_all.h>
67 #include <dev/scsipi/scsi_message.h>
68 
69 static int	scsipi_complete(struct scsipi_xfer *);
70 static void	scsipi_request_sense(struct scsipi_xfer *);
71 static int	scsipi_enqueue(struct scsipi_xfer *);
72 static void	scsipi_run_queue(struct scsipi_channel *chan);
73 
74 static void	scsipi_completion_thread(void *);
75 
76 static void	scsipi_get_tag(struct scsipi_xfer *);
77 static void	scsipi_put_tag(struct scsipi_xfer *);
78 
79 static int	scsipi_get_resource(struct scsipi_channel *);
80 static void	scsipi_put_resource(struct scsipi_channel *);
81 
82 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
83 		    struct scsipi_max_openings *);
84 static void	scsipi_async_event_xfer_mode(struct scsipi_channel *,
85 		    struct scsipi_xfer_mode *);
86 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
87 
88 static struct pool scsipi_xfer_pool;
89 
90 /*
91  * scsipi_init:
92  *
93  *	Called when a scsibus or atapibus is attached to the system
94  *	to initialize shared data structures.
95  */
96 void
97 scsipi_init(void)
98 {
99 	static int scsipi_init_done;
100 
101 	if (scsipi_init_done)
102 		return;
103 	scsipi_init_done = 1;
104 
105 	/* Initialize the scsipi_xfer pool. */
106 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
107 	    0, 0, "scxspl", NULL);
108 	if (pool_prime(&scsipi_xfer_pool,
109 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
110 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
111 	}
112 }
113 
114 /*
115  * scsipi_channel_init:
116  *
117  *	Initialize a scsipi_channel when it is attached.
118  */
119 int
120 scsipi_channel_init(struct scsipi_channel *chan)
121 {
122 	int i;
123 
124 	/* Initialize shared data. */
125 	scsipi_init();
126 
127 	/* Initialize the queues. */
128 	TAILQ_INIT(&chan->chan_queue);
129 	TAILQ_INIT(&chan->chan_complete);
130 
131 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
132 		LIST_INIT(&chan->chan_periphtab[i]);
133 
134 	/*
135 	 * Create the asynchronous completion thread.
136 	 */
137 	kthread_create(scsipi_create_completion_thread, chan);
138 	return (0);
139 }
140 
141 /*
142  * scsipi_channel_shutdown:
143  *
144  *	Shutdown a scsipi_channel.
145  */
146 void
147 scsipi_channel_shutdown(struct scsipi_channel *chan)
148 {
149 
150 	/*
151 	 * Shut down the completion thread.
152 	 */
153 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
154 	wakeup(&chan->chan_complete);
155 
156 	/*
157 	 * Now wait for the thread to exit.
158 	 */
159 	while (chan->chan_thread != NULL)
160 		(void) tsleep(&chan->chan_thread, PRIBIO, "scshut", 0);
161 }
162 
163 static uint32_t
164 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
165 {
166 	uint32_t hash;
167 
168 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
169 	hash = hash32_buf(&l, sizeof(l), hash);
170 
171 	return (hash & SCSIPI_CHAN_PERIPH_HASHMASK);
172 }
173 
174 /*
175  * scsipi_insert_periph:
176  *
177  *	Insert a periph into the channel.
178  */
179 void
180 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
181 {
182 	uint32_t hash;
183 	int s;
184 
185 	hash = scsipi_chan_periph_hash(periph->periph_target,
186 	    periph->periph_lun);
187 
188 	s = splbio();
189 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
190 	splx(s);
191 }
192 
193 /*
194  * scsipi_remove_periph:
195  *
196  *	Remove a periph from the channel.
197  */
198 void
199 scsipi_remove_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
200 {
201 	int s;
202 
203 	s = splbio();
204 	LIST_REMOVE(periph, periph_hash);
205 	splx(s);
206 }
207 
208 /*
209  * scsipi_lookup_periph:
210  *
211  *	Lookup a periph on the specified channel.
212  */
213 struct scsipi_periph *
214 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
215 {
216 	struct scsipi_periph *periph;
217 	uint32_t hash;
218 	int s;
219 
220 	if (target >= chan->chan_ntargets ||
221 	    lun >= chan->chan_nluns)
222 		return (NULL);
223 
224 	hash = scsipi_chan_periph_hash(target, lun);
225 
226 	s = splbio();
227 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
228 		if (periph->periph_target == target &&
229 		    periph->periph_lun == lun)
230 			break;
231 	}
232 	splx(s);
233 
234 	return (periph);
235 }
236 
237 /*
238  * scsipi_get_resource:
239  *
240  *	Allocate a single xfer `resource' from the channel.
241  *
242  *	NOTE: Must be called at splbio().
243  */
244 static int
245 scsipi_get_resource(struct scsipi_channel *chan)
246 {
247 	struct scsipi_adapter *adapt = chan->chan_adapter;
248 
249 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
250 		if (chan->chan_openings > 0) {
251 			chan->chan_openings--;
252 			return (1);
253 		}
254 		return (0);
255 	}
256 
257 	if (adapt->adapt_openings > 0) {
258 		adapt->adapt_openings--;
259 		return (1);
260 	}
261 	return (0);
262 }
263 
264 /*
265  * scsipi_grow_resources:
266  *
267  *	Attempt to grow resources for a channel.  If this succeeds,
268  *	we allocate one for our caller.
269  *
270  *	NOTE: Must be called at splbio().
271  */
272 static __inline int
273 scsipi_grow_resources(struct scsipi_channel *chan)
274 {
275 
276 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
277 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
278 			scsipi_adapter_request(chan,
279 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
280 			return (scsipi_get_resource(chan));
281 		}
282 		/*
283 		 * ask the channel thread to do it. It'll have to thaw the
284 		 * queue
285 		 */
286 		scsipi_channel_freeze(chan, 1);
287 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
288 		wakeup(&chan->chan_complete);
289 		return (0);
290 	}
291 
292 	return (0);
293 }
294 
295 /*
296  * scsipi_put_resource:
297  *
298  *	Free a single xfer `resource' to the channel.
299  *
300  *	NOTE: Must be called at splbio().
301  */
302 static void
303 scsipi_put_resource(struct scsipi_channel *chan)
304 {
305 	struct scsipi_adapter *adapt = chan->chan_adapter;
306 
307 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
308 		chan->chan_openings++;
309 	else
310 		adapt->adapt_openings++;
311 }
312 
313 /*
314  * scsipi_get_tag:
315  *
316  *	Get a tag ID for the specified xfer.
317  *
318  *	NOTE: Must be called at splbio().
319  */
320 static void
321 scsipi_get_tag(struct scsipi_xfer *xs)
322 {
323 	struct scsipi_periph *periph = xs->xs_periph;
324 	int bit, tag;
325 	u_int word;
326 
327 	bit = 0;	/* XXX gcc */
328 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
329 		bit = ffs(periph->periph_freetags[word]);
330 		if (bit != 0)
331 			break;
332 	}
333 #ifdef DIAGNOSTIC
334 	if (word == PERIPH_NTAGWORDS) {
335 		scsipi_printaddr(periph);
336 		printf("no free tags\n");
337 		panic("scsipi_get_tag");
338 	}
339 #endif
340 
341 	bit -= 1;
342 	periph->periph_freetags[word] &= ~(1 << bit);
343 	tag = (word << 5) | bit;
344 
345 	/* XXX Should eventually disallow this completely. */
346 	if (tag >= periph->periph_openings) {
347 		scsipi_printaddr(periph);
348 		printf("WARNING: tag %d greater than available openings %d\n",
349 		    tag, periph->periph_openings);
350 	}
351 
352 	xs->xs_tag_id = tag;
353 }
354 
355 /*
356  * scsipi_put_tag:
357  *
358  *	Put the tag ID for the specified xfer back into the pool.
359  *
360  *	NOTE: Must be called at splbio().
361  */
362 static void
363 scsipi_put_tag(struct scsipi_xfer *xs)
364 {
365 	struct scsipi_periph *periph = xs->xs_periph;
366 	int word, bit;
367 
368 	word = xs->xs_tag_id >> 5;
369 	bit = xs->xs_tag_id & 0x1f;
370 
371 	periph->periph_freetags[word] |= (1 << bit);
372 }
373 
374 /*
375  * scsipi_get_xs:
376  *
377  *	Allocate an xfer descriptor and associate it with the
378  *	specified peripherial.  If the peripherial has no more
379  *	available command openings, we either block waiting for
380  *	one to become available, or fail.
381  */
382 struct scsipi_xfer *
383 scsipi_get_xs(struct scsipi_periph *periph, int flags)
384 {
385 	struct scsipi_xfer *xs;
386 	int s;
387 
388 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
389 
390 	KASSERT(!cold);
391 
392 #ifdef DIAGNOSTIC
393 	/*
394 	 * URGENT commands can never be ASYNC.
395 	 */
396 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
397 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
398 		scsipi_printaddr(periph);
399 		printf("URGENT and ASYNC\n");
400 		panic("scsipi_get_xs");
401 	}
402 #endif
403 
404 	s = splbio();
405 	/*
406 	 * Wait for a command opening to become available.  Rules:
407 	 *
408 	 *	- All xfers must wait for an available opening.
409 	 *	  Exception: URGENT xfers can proceed when
410 	 *	  active == openings, because we use the opening
411 	 *	  of the command we're recovering for.
412 	 *	- if the periph has sense pending, only URGENT & REQSENSE
413 	 *	  xfers may proceed.
414 	 *
415 	 *	- If the periph is recovering, only URGENT xfers may
416 	 *	  proceed.
417 	 *
418 	 *	- If the periph is currently executing a recovery
419 	 *	  command, URGENT commands must block, because only
420 	 *	  one recovery command can execute at a time.
421 	 */
422 	for (;;) {
423 		if (flags & XS_CTL_URGENT) {
424 			if (periph->periph_active > periph->periph_openings)
425 				goto wait_for_opening;
426 			if (periph->periph_flags & PERIPH_SENSE) {
427 				if ((flags & XS_CTL_REQSENSE) == 0)
428 					goto wait_for_opening;
429 			} else {
430 				if ((periph->periph_flags &
431 				    PERIPH_RECOVERY_ACTIVE) != 0)
432 					goto wait_for_opening;
433 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
434 			}
435 			break;
436 		}
437 		if (periph->periph_active >= periph->periph_openings ||
438 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
439 			goto wait_for_opening;
440 		periph->periph_active++;
441 		break;
442 
443  wait_for_opening:
444 		if (flags & XS_CTL_NOSLEEP) {
445 			splx(s);
446 			return (NULL);
447 		}
448 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
449 		periph->periph_flags |= PERIPH_WAITING;
450 		(void) tsleep(periph, PRIBIO, "getxs", 0);
451 	}
452 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
453 	xs = pool_get(&scsipi_xfer_pool,
454 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
455 	if (xs == NULL) {
456 		if (flags & XS_CTL_URGENT) {
457 			if ((flags & XS_CTL_REQSENSE) == 0)
458 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
459 		} else
460 			periph->periph_active--;
461 		scsipi_printaddr(periph);
462 		printf("unable to allocate %sscsipi_xfer\n",
463 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
464 	}
465 	splx(s);
466 
467 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
468 
469 	if (xs != NULL) {
470 		memset(xs, 0, sizeof(*xs));
471 		callout_init(&xs->xs_callout);
472 		xs->xs_periph = periph;
473 		xs->xs_control = flags;
474 		xs->xs_status = 0;
475 		s = splbio();
476 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
477 		splx(s);
478 	}
479 	return (xs);
480 }
481 
482 /*
483  * scsipi_put_xs:
484  *
485  *	Release an xfer descriptor, decreasing the outstanding command
486  *	count for the peripherial.  If there is a thread waiting for
487  *	an opening, wake it up.  If not, kick any queued I/O the
488  *	peripherial may have.
489  *
490  *	NOTE: Must be called at splbio().
491  */
492 void
493 scsipi_put_xs(struct scsipi_xfer *xs)
494 {
495 	struct scsipi_periph *periph = xs->xs_periph;
496 	int flags = xs->xs_control;
497 
498 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
499 
500 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
501 	pool_put(&scsipi_xfer_pool, xs);
502 
503 #ifdef DIAGNOSTIC
504 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
505 	    periph->periph_active == 0) {
506 		scsipi_printaddr(periph);
507 		printf("recovery without a command to recovery for\n");
508 		panic("scsipi_put_xs");
509 	}
510 #endif
511 
512 	if (flags & XS_CTL_URGENT) {
513 		if ((flags & XS_CTL_REQSENSE) == 0)
514 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
515 	} else
516 		periph->periph_active--;
517 	if (periph->periph_active == 0 &&
518 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
519 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
520 		wakeup(&periph->periph_active);
521 	}
522 
523 	if (periph->periph_flags & PERIPH_WAITING) {
524 		periph->periph_flags &= ~PERIPH_WAITING;
525 		wakeup(periph);
526 	} else {
527 		if (periph->periph_switch->psw_start != NULL &&
528 		    (periph->periph_dev->dv_flags & DVF_ACTIVE)) {
529 			SC_DEBUG(periph, SCSIPI_DB2,
530 			    ("calling private start()\n"));
531 			(*periph->periph_switch->psw_start)(periph);
532 		}
533 	}
534 }
535 
536 /*
537  * scsipi_channel_freeze:
538  *
539  *	Freeze a channel's xfer queue.
540  */
541 void
542 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
543 {
544 	int s;
545 
546 	s = splbio();
547 	chan->chan_qfreeze += count;
548 	splx(s);
549 }
550 
551 /*
552  * scsipi_channel_thaw:
553  *
554  *	Thaw a channel's xfer queue.
555  */
556 void
557 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
558 {
559 	int s;
560 
561 	s = splbio();
562 	chan->chan_qfreeze -= count;
563 	/*
564 	 * Don't let the freeze count go negative.
565 	 *
566 	 * Presumably the adapter driver could keep track of this,
567 	 * but it might just be easier to do this here so as to allow
568 	 * multiple callers, including those outside the adapter driver.
569 	 */
570 	if (chan->chan_qfreeze < 0) {
571 		chan->chan_qfreeze = 0;
572 	}
573 	splx(s);
574 	/*
575 	 * Kick the channel's queue here.  Note, we may be running in
576 	 * interrupt context (softclock or HBA's interrupt), so the adapter
577 	 * driver had better not sleep.
578 	 */
579 	if (chan->chan_qfreeze == 0)
580 		scsipi_run_queue(chan);
581 }
582 
583 /*
584  * scsipi_channel_timed_thaw:
585  *
586  *	Thaw a channel after some time has expired. This will also
587  * 	run the channel's queue if the freeze count has reached 0.
588  */
589 void
590 scsipi_channel_timed_thaw(void *arg)
591 {
592 	struct scsipi_channel *chan = arg;
593 
594 	scsipi_channel_thaw(chan, 1);
595 }
596 
597 /*
598  * scsipi_periph_freeze:
599  *
600  *	Freeze a device's xfer queue.
601  */
602 void
603 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
604 {
605 	int s;
606 
607 	s = splbio();
608 	periph->periph_qfreeze += count;
609 	splx(s);
610 }
611 
612 /*
613  * scsipi_periph_thaw:
614  *
615  *	Thaw a device's xfer queue.
616  */
617 void
618 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
619 {
620 	int s;
621 
622 	s = splbio();
623 	periph->periph_qfreeze -= count;
624 #ifdef DIAGNOSTIC
625 	if (periph->periph_qfreeze < 0) {
626 		static const char pc[] = "periph freeze count < 0";
627 		scsipi_printaddr(periph);
628 		printf("%s\n", pc);
629 		panic(pc);
630 	}
631 #endif
632 	if (periph->periph_qfreeze == 0 &&
633 	    (periph->periph_flags & PERIPH_WAITING) != 0)
634 		wakeup(periph);
635 	splx(s);
636 }
637 
638 /*
639  * scsipi_periph_timed_thaw:
640  *
641  *	Thaw a device after some time has expired.
642  */
643 void
644 scsipi_periph_timed_thaw(void *arg)
645 {
646 	int s;
647 	struct scsipi_periph *periph = arg;
648 
649 	callout_stop(&periph->periph_callout);
650 
651 	s = splbio();
652 	scsipi_periph_thaw(periph, 1);
653 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
654 		/*
655 		 * Kick the channel's queue here.  Note, we're running in
656 		 * interrupt context (softclock), so the adapter driver
657 		 * had better not sleep.
658 		 */
659 		scsipi_run_queue(periph->periph_channel);
660 	} else {
661 		/*
662 		 * Tell the completion thread to kick the channel's queue here.
663 		 */
664 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
665 		wakeup(&periph->periph_channel->chan_complete);
666 	}
667 	splx(s);
668 }
669 
670 /*
671  * scsipi_wait_drain:
672  *
673  *	Wait for a periph's pending xfers to drain.
674  */
675 void
676 scsipi_wait_drain(struct scsipi_periph *periph)
677 {
678 	int s;
679 
680 	s = splbio();
681 	while (periph->periph_active != 0) {
682 		periph->periph_flags |= PERIPH_WAITDRAIN;
683 		(void) tsleep(&periph->periph_active, PRIBIO, "sxdrn", 0);
684 	}
685 	splx(s);
686 }
687 
688 /*
689  * scsipi_kill_pending:
690  *
691  *	Kill off all pending xfers for a periph.
692  *
693  *	NOTE: Must be called at splbio().
694  */
695 void
696 scsipi_kill_pending(struct scsipi_periph *periph)
697 {
698 
699 	(*periph->periph_channel->chan_bustype->bustype_kill_pending)(periph);
700 	scsipi_wait_drain(periph);
701 }
702 
703 /*
704  * scsipi_print_cdb:
705  * prints a command descriptor block (for debug purpose, error messages,
706  * SCSIPI_VERBOSE, ...)
707  */
708 void
709 scsipi_print_cdb(struct scsipi_generic *cmd)
710 {
711 	int i, j;
712 
713  	printf("0x%02x", cmd->opcode);
714 
715  	switch (CDB_GROUPID(cmd->opcode)) {
716  	case CDB_GROUPID_0:
717  		j = CDB_GROUP0;
718  		break;
719  	case CDB_GROUPID_1:
720  		j = CDB_GROUP1;
721  		break;
722  	case CDB_GROUPID_2:
723  		j = CDB_GROUP2;
724  		break;
725  	case CDB_GROUPID_3:
726  		j = CDB_GROUP3;
727  		break;
728  	case CDB_GROUPID_4:
729  		j = CDB_GROUP4;
730  		break;
731  	case CDB_GROUPID_5:
732  		j = CDB_GROUP5;
733  		break;
734  	case CDB_GROUPID_6:
735  		j = CDB_GROUP6;
736  		break;
737  	case CDB_GROUPID_7:
738  		j = CDB_GROUP7;
739  		break;
740  	default:
741  		j = 0;
742  	}
743  	if (j == 0)
744  		j = sizeof (cmd->bytes);
745  	for (i = 0; i < j-1; i++) /* already done the opcode */
746  		printf(" %02x", cmd->bytes[i]);
747 }
748 
749 /*
750  * scsipi_interpret_sense:
751  *
752  *	Look at the returned sense and act on the error, determining
753  *	the unix error number to pass back.  (0 = report no error)
754  *
755  *	NOTE: If we return ERESTART, we are expected to haved
756  *	thawed the device!
757  *
758  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
759  */
760 int
761 scsipi_interpret_sense(struct scsipi_xfer *xs)
762 {
763 	struct scsi_sense_data *sense;
764 	struct scsipi_periph *periph = xs->xs_periph;
765 	u_int8_t key;
766 	int error;
767 #ifndef	SCSIVERBOSE
768 	u_int32_t info;
769 	static const char *error_mes[] = {
770 		"soft error (corrected)",
771 		"not ready", "medium error",
772 		"non-media hardware failure", "illegal request",
773 		"unit attention", "readonly device",
774 		"no data found", "vendor unique",
775 		"copy aborted", "command aborted",
776 		"search returned equal", "volume overflow",
777 		"verify miscompare", "unknown error key"
778 	};
779 #endif
780 
781 	sense = &xs->sense.scsi_sense;
782 #ifdef SCSIPI_DEBUG
783 	if (periph->periph_flags & SCSIPI_DB1) {
784 		int count;
785 		scsipi_printaddr(periph);
786 		printf(" sense debug information:\n");
787 		printf("\tcode 0x%x valid %d\n",
788 			SSD_RCODE(sense->response_code),
789 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
790 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
791 			sense->segment,
792 			SSD_SENSE_KEY(sense->flags),
793 			sense->flags & SSD_ILI ? 1 : 0,
794 			sense->flags & SSD_EOM ? 1 : 0,
795 			sense->flags & SSD_FILEMARK ? 1 : 0);
796 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
797 			"extra bytes\n",
798 			sense->info[0],
799 			sense->info[1],
800 			sense->info[2],
801 			sense->info[3],
802 			sense->extra_len);
803 		printf("\textra: ");
804 		for (count = 0; count < SSD_ADD_BYTES_LIM(sense); count++)
805 			printf("0x%x ", sense->csi[count]);
806 		printf("\n");
807 	}
808 #endif
809 
810 	/*
811 	 * If the periph has it's own error handler, call it first.
812 	 * If it returns a legit error value, return that, otherwise
813 	 * it wants us to continue with normal error processing.
814 	 */
815 	if (periph->periph_switch->psw_error != NULL) {
816 		SC_DEBUG(periph, SCSIPI_DB2,
817 		    ("calling private err_handler()\n"));
818 		error = (*periph->periph_switch->psw_error)(xs);
819 		if (error != EJUSTRETURN)
820 			return (error);
821 	}
822 	/* otherwise use the default */
823 	switch (SSD_RCODE(sense->response_code)) {
824 
825 		/*
826 		 * Old SCSI-1 and SASI devices respond with
827 		 * codes other than 70.
828 		 */
829 	case 0x00:		/* no error (command completed OK) */
830 		return (0);
831 	case 0x04:		/* drive not ready after it was selected */
832 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
833 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
834 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
835 			return (0);
836 		/* XXX - display some sort of error here? */
837 		return (EIO);
838 	case 0x20:		/* invalid command */
839 		if ((xs->xs_control &
840 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
841 			return (0);
842 		return (EINVAL);
843 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
844 		return (EACCES);
845 
846 		/*
847 		 * If it's code 70, use the extended stuff and
848 		 * interpret the key
849 		 */
850 	case 0x71:		/* delayed error */
851 		scsipi_printaddr(periph);
852 		key = SSD_SENSE_KEY(sense->flags);
853 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
854 		/* FALLTHROUGH */
855 	case 0x70:
856 #ifndef	SCSIVERBOSE
857 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
858 			info = _4btol(sense->info);
859 		else
860 			info = 0;
861 #endif
862 		key = SSD_SENSE_KEY(sense->flags);
863 
864 		switch (key) {
865 		case SKEY_NO_SENSE:
866 		case SKEY_RECOVERED_ERROR:
867 			if (xs->resid == xs->datalen && xs->datalen) {
868 				/*
869 				 * Why is this here?
870 				 */
871 				xs->resid = 0;	/* not short read */
872 			}
873 		case SKEY_EQUAL:
874 			error = 0;
875 			break;
876 		case SKEY_NOT_READY:
877 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
878 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
879 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
880 				return (0);
881 			if (sense->asc == 0x3A) {
882 				error = ENODEV; /* Medium not present */
883 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
884 					return (error);
885 			} else
886 				error = EIO;
887 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
888 				return (error);
889 			break;
890 		case SKEY_ILLEGAL_REQUEST:
891 			if ((xs->xs_control &
892 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
893 				return (0);
894 			/*
895 			 * Handle the case where a device reports
896 			 * Logical Unit Not Supported during discovery.
897 			 */
898 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
899 			    sense->asc == 0x25 &&
900 			    sense->ascq == 0x00)
901 				return (EINVAL);
902 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
903 				return (EIO);
904 			error = EINVAL;
905 			break;
906 		case SKEY_UNIT_ATTENTION:
907 			if (sense->asc == 0x29 &&
908 			    sense->ascq == 0x00) {
909 				/* device or bus reset */
910 				return (ERESTART);
911 			}
912 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
913 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
914 			if ((xs->xs_control &
915 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
916 				/* XXX Should reupload any transient state. */
917 				(periph->periph_flags &
918 				 PERIPH_REMOVABLE) == 0) {
919 				return (ERESTART);
920 			}
921 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
922 				return (EIO);
923 			error = EIO;
924 			break;
925 		case SKEY_DATA_PROTECT:
926 			error = EROFS;
927 			break;
928 		case SKEY_BLANK_CHECK:
929 			error = 0;
930 			break;
931 		case SKEY_ABORTED_COMMAND:
932 			if (xs->xs_retries != 0) {
933 				xs->xs_retries--;
934 				error = ERESTART;
935 			} else
936 				error = EIO;
937 			break;
938 		case SKEY_VOLUME_OVERFLOW:
939 			error = ENOSPC;
940 			break;
941 		default:
942 			error = EIO;
943 			break;
944 		}
945 
946 #ifdef SCSIVERBOSE
947 		if (key && (xs->xs_control & XS_CTL_SILENT) == 0)
948 			scsipi_print_sense(xs, 0);
949 #else
950 		if (key) {
951 			scsipi_printaddr(periph);
952 			printf("%s", error_mes[key - 1]);
953 			if ((sense->response_code & SSD_RCODE_VALID) != 0) {
954 				switch (key) {
955 				case SKEY_NOT_READY:
956 				case SKEY_ILLEGAL_REQUEST:
957 				case SKEY_UNIT_ATTENTION:
958 				case SKEY_DATA_PROTECT:
959 					break;
960 				case SKEY_BLANK_CHECK:
961 					printf(", requested size: %d (decimal)",
962 					    info);
963 					break;
964 				case SKEY_ABORTED_COMMAND:
965 					if (xs->xs_retries)
966 						printf(", retrying");
967 					printf(", cmd 0x%x, info 0x%x",
968 					    xs->cmd->opcode, info);
969 					break;
970 				default:
971 					printf(", info = %d (decimal)", info);
972 				}
973 			}
974 			if (sense->extra_len != 0) {
975 				int n;
976 				printf(", data =");
977 				for (n = 0; n < sense->extra_len; n++)
978 					printf(" %02x",
979 					    sense->csi[n]);
980 			}
981 			printf("\n");
982 		}
983 #endif
984 		return (error);
985 
986 	/*
987 	 * Some other code, just report it
988 	 */
989 	default:
990 #if    defined(SCSIDEBUG) || defined(DEBUG)
991 	{
992 		static const char *uc = "undecodable sense error";
993 		int i;
994 		u_int8_t *cptr = (u_int8_t *) sense;
995 		scsipi_printaddr(periph);
996 		if (xs->cmd == &xs->cmdstore) {
997 			printf("%s for opcode 0x%x, data=",
998 			    uc, xs->cmdstore.opcode);
999 		} else {
1000 			printf("%s, data=", uc);
1001 		}
1002 		for (i = 0; i < sizeof (sense); i++)
1003 			printf(" 0x%02x", *(cptr++) & 0xff);
1004 		printf("\n");
1005 	}
1006 #else
1007 		scsipi_printaddr(periph);
1008 		printf("Sense Error Code 0x%x",
1009 			SSD_RCODE(sense->response_code));
1010 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1011 			struct scsi_sense_data_unextended *usense =
1012 			    (struct scsi_sense_data_unextended *)sense;
1013 			printf(" at block no. %d (decimal)",
1014 			    _3btol(usense->block));
1015 		}
1016 		printf("\n");
1017 #endif
1018 		return (EIO);
1019 	}
1020 }
1021 
1022 /*
1023  * scsipi_size:
1024  *
1025  *	Find out from the device what its capacity is.
1026  */
1027 u_int64_t
1028 scsipi_size(struct scsipi_periph *periph, int flags)
1029 {
1030 	union {
1031 		struct scsipi_read_capacity_10 cmd;
1032 		struct scsipi_read_capacity_16 cmd16;
1033 	} cmd;
1034 	union {
1035 		struct scsipi_read_capacity_10_data data;
1036 		struct scsipi_read_capacity_16_data data16;
1037 	} data;
1038 
1039 	memset(&cmd, 0, sizeof(cmd));
1040 	cmd.cmd.opcode = READ_CAPACITY_10;
1041 
1042 	/*
1043 	 * If the command works, interpret the result as a 4 byte
1044 	 * number of blocks
1045 	 */
1046 	if (scsipi_command(periph, (void *)&cmd.cmd, sizeof(cmd.cmd),
1047 	    (void *)&data.data, sizeof(data.data), SCSIPIRETRIES, 20000, NULL,
1048 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1049 		return (0);
1050 
1051 	if (_4btol(data.data.addr) != 0xffffffff)
1052 		return (_4btol(data.data.addr) + 1);
1053 
1054 	/*
1055 	 * Device is larger than can be reflected by READ CAPACITY (10).
1056 	 * Try READ CAPACITY (16).
1057 	 */
1058 
1059 	memset(&cmd, 0, sizeof(cmd));
1060 	cmd.cmd16.opcode = READ_CAPACITY_16;
1061 	cmd.cmd16.byte2 = SRC16_SERVICE_ACTION;
1062 	_lto4b(sizeof(data.data16), cmd.cmd16.len);
1063 
1064 	if (scsipi_command(periph, (void *)&cmd.cmd16, sizeof(cmd.cmd16),
1065 	    (void *)&data.data16, sizeof(data.data16), SCSIPIRETRIES, 20000,
1066 	    NULL,
1067 	    flags | XS_CTL_DATA_IN | XS_CTL_DATA_ONSTACK | XS_CTL_SILENT) != 0)
1068 		return (0);
1069 
1070 	return (_8btol(data.data16.addr) + 1);
1071 }
1072 
1073 /*
1074  * scsipi_test_unit_ready:
1075  *
1076  *	Issue a `test unit ready' request.
1077  */
1078 int
1079 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1080 {
1081 	struct scsi_test_unit_ready cmd;
1082 	int retries;
1083 
1084 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1085 	if (periph->periph_quirks & PQUIRK_NOTUR)
1086 		return (0);
1087 
1088 	if (flags & XS_CTL_DISCOVERY)
1089 		retries = 0;
1090 	else
1091 		retries = SCSIPIRETRIES;
1092 
1093 	memset(&cmd, 0, sizeof(cmd));
1094 	cmd.opcode = SCSI_TEST_UNIT_READY;
1095 
1096 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1097 	    retries, 10000, NULL, flags));
1098 }
1099 
1100 /*
1101  * scsipi_inquire:
1102  *
1103  *	Ask the device about itself.
1104  */
1105 int
1106 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1107     int flags)
1108 {
1109 	struct scsipi_inquiry cmd;
1110 	int error;
1111 	int retries;
1112 
1113 	if (flags & XS_CTL_DISCOVERY)
1114 		retries = 0;
1115 	else
1116 		retries = SCSIPIRETRIES;
1117 
1118 	/*
1119 	 * If we request more data than the device can provide, it SHOULD just
1120 	 * return a short reponse.  However, some devices error with an
1121 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1122 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1123 	 * and sends corrupted CRCs).  To work around this, and to bring our
1124 	 * behavior more in line with other OSes, we do a shorter inquiry,
1125 	 * covering all the SCSI-2 information, first, and then request more
1126 	 * data iff the "additional length" field indicates there is more.
1127 	 * - mycroft, 2003/10/16
1128 	 */
1129 	memset(&cmd, 0, sizeof(cmd));
1130 	cmd.opcode = INQUIRY;
1131 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1132 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1133 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1134 	    10000, NULL, flags | XS_CTL_DATA_IN);
1135 	if (!error &&
1136 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1137 #if 0
1138 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1139 #endif
1140 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1141 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1142 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1143 		    10000, NULL, flags | XS_CTL_DATA_IN);
1144 #if 0
1145 printf("inquire: error=%d\n", error);
1146 #endif
1147 	}
1148 
1149 #ifdef SCSI_OLD_NOINQUIRY
1150 	/*
1151 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1152 	 * This board doesn't support the INQUIRY command at all.
1153 	 */
1154 	if (error == EINVAL || error == EACCES) {
1155 		/*
1156 		 * Conjure up an INQUIRY response.
1157 		 */
1158 		inqbuf->device = (error == EINVAL ?
1159 			 SID_QUAL_LU_PRESENT :
1160 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1161 		inqbuf->dev_qual2 = 0;
1162 		inqbuf->version = 0;
1163 		inqbuf->response_format = SID_FORMAT_SCSI1;
1164 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1165 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1166 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1167 		error = 0;
1168 	}
1169 
1170 	/*
1171 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1172 	 * This board gives an empty response to an INQUIRY command.
1173 	 */
1174 	else if (error == 0 &&
1175 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1176 	    inqbuf->dev_qual2 == 0 &&
1177 	    inqbuf->version == 0 &&
1178 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1179 		/*
1180 		 * Fill out the INQUIRY response.
1181 		 */
1182 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1183 		inqbuf->dev_qual2 = SID_REMOVABLE;
1184 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1185 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1186 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1187 	}
1188 #endif /* SCSI_OLD_NOINQUIRY */
1189 
1190 	return error;
1191 }
1192 
1193 /*
1194  * scsipi_prevent:
1195  *
1196  *	Prevent or allow the user to remove the media
1197  */
1198 int
1199 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1200 {
1201 	struct scsi_prevent_allow_medium_removal cmd;
1202 
1203 	memset(&cmd, 0, sizeof(cmd));
1204 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1205 	cmd.how = type;
1206 
1207 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1208 	    SCSIPIRETRIES, 5000, NULL, flags));
1209 }
1210 
1211 /*
1212  * scsipi_start:
1213  *
1214  *	Send a START UNIT.
1215  */
1216 int
1217 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1218 {
1219 	struct scsipi_start_stop cmd;
1220 
1221 	memset(&cmd, 0, sizeof(cmd));
1222 	cmd.opcode = START_STOP;
1223 	cmd.byte2 = 0x00;
1224 	cmd.how = type;
1225 
1226 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1227 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags));
1228 }
1229 
1230 /*
1231  * scsipi_mode_sense, scsipi_mode_sense_big:
1232  *	get a sense page from a device
1233  */
1234 
1235 int
1236 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1237     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1238     int timeout)
1239 {
1240 	struct scsi_mode_sense_6 cmd;
1241 
1242 	memset(&cmd, 0, sizeof(cmd));
1243 	cmd.opcode = SCSI_MODE_SENSE_6;
1244 	cmd.byte2 = byte2;
1245 	cmd.page = page;
1246 	cmd.length = len & 0xff;
1247 
1248 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1249 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1250 }
1251 
1252 int
1253 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1254     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1255     int timeout)
1256 {
1257 	struct scsi_mode_sense_10 cmd;
1258 
1259 	memset(&cmd, 0, sizeof(cmd));
1260 	cmd.opcode = SCSI_MODE_SENSE_10;
1261 	cmd.byte2 = byte2;
1262 	cmd.page = page;
1263 	_lto2b(len, cmd.length);
1264 
1265 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1266 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN));
1267 }
1268 
1269 int
1270 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1271     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1272     int timeout)
1273 {
1274 	struct scsi_mode_select_6 cmd;
1275 
1276 	memset(&cmd, 0, sizeof(cmd));
1277 	cmd.opcode = SCSI_MODE_SELECT_6;
1278 	cmd.byte2 = byte2;
1279 	cmd.length = len & 0xff;
1280 
1281 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1282 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1283 }
1284 
1285 int
1286 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1287     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1288     int timeout)
1289 {
1290 	struct scsi_mode_select_10 cmd;
1291 
1292 	memset(&cmd, 0, sizeof(cmd));
1293 	cmd.opcode = SCSI_MODE_SELECT_10;
1294 	cmd.byte2 = byte2;
1295 	_lto2b(len, cmd.length);
1296 
1297 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1298 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT));
1299 }
1300 
1301 /*
1302  * scsipi_done:
1303  *
1304  *	This routine is called by an adapter's interrupt handler when
1305  *	an xfer is completed.
1306  */
1307 void
1308 scsipi_done(struct scsipi_xfer *xs)
1309 {
1310 	struct scsipi_periph *periph = xs->xs_periph;
1311 	struct scsipi_channel *chan = periph->periph_channel;
1312 	int s, freezecnt;
1313 
1314 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1315 #ifdef SCSIPI_DEBUG
1316 	if (periph->periph_dbflags & SCSIPI_DB1)
1317 		show_scsipi_cmd(xs);
1318 #endif
1319 
1320 	s = splbio();
1321 	/*
1322 	 * The resource this command was using is now free.
1323 	 */
1324 	scsipi_put_resource(chan);
1325 	xs->xs_periph->periph_sent--;
1326 
1327 	/*
1328 	 * If the command was tagged, free the tag.
1329 	 */
1330 	if (XS_CTL_TAGTYPE(xs) != 0)
1331 		scsipi_put_tag(xs);
1332 	else
1333 		periph->periph_flags &= ~PERIPH_UNTAG;
1334 
1335 	/* Mark the command as `done'. */
1336 	xs->xs_status |= XS_STS_DONE;
1337 
1338 #ifdef DIAGNOSTIC
1339 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1340 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1341 		panic("scsipi_done: ASYNC and POLL");
1342 #endif
1343 
1344 	/*
1345 	 * If the xfer had an error of any sort, freeze the
1346 	 * periph's queue.  Freeze it again if we were requested
1347 	 * to do so in the xfer.
1348 	 */
1349 	freezecnt = 0;
1350 	if (xs->error != XS_NOERROR)
1351 		freezecnt++;
1352 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1353 		freezecnt++;
1354 	if (freezecnt != 0)
1355 		scsipi_periph_freeze(periph, freezecnt);
1356 
1357 	/*
1358 	 * record the xfer with a pending sense, in case a SCSI reset is
1359 	 * received before the thread is waked up.
1360 	 */
1361 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1362 		periph->periph_flags |= PERIPH_SENSE;
1363 		periph->periph_xscheck = xs;
1364 	}
1365 
1366 	/*
1367 	 * If this was an xfer that was not to complete asynchronously,
1368 	 * let the requesting thread perform error checking/handling
1369 	 * in its context.
1370 	 */
1371 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1372 		splx(s);
1373 		/*
1374 		 * If it's a polling job, just return, to unwind the
1375 		 * call graph.  We don't need to restart the queue,
1376 		 * because pollings jobs are treated specially, and
1377 		 * are really only used during crash dumps anyway
1378 		 * (XXX or during boot-time autconfiguration of
1379 		 * ATAPI devices).
1380 		 */
1381 		if (xs->xs_control & XS_CTL_POLL)
1382 			return;
1383 		wakeup(xs);
1384 		goto out;
1385 	}
1386 
1387 	/*
1388 	 * Catch the extremely common case of I/O completing
1389 	 * without error; no use in taking a context switch
1390 	 * if we can handle it in interrupt context.
1391 	 */
1392 	if (xs->error == XS_NOERROR) {
1393 		splx(s);
1394 		(void) scsipi_complete(xs);
1395 		goto out;
1396 	}
1397 
1398 	/*
1399 	 * There is an error on this xfer.  Put it on the channel's
1400 	 * completion queue, and wake up the completion thread.
1401 	 */
1402 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1403 	splx(s);
1404 	wakeup(&chan->chan_complete);
1405 
1406  out:
1407 	/*
1408 	 * If there are more xfers on the channel's queue, attempt to
1409 	 * run them.
1410 	 */
1411 	scsipi_run_queue(chan);
1412 }
1413 
1414 /*
1415  * scsipi_complete:
1416  *
1417  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1418  *
1419  *	NOTE: This routine MUST be called with valid thread context
1420  *	except for the case where the following two conditions are
1421  *	true:
1422  *
1423  *		xs->error == XS_NOERROR
1424  *		XS_CTL_ASYNC is set in xs->xs_control
1425  *
1426  *	The semantics of this routine can be tricky, so here is an
1427  *	explanation:
1428  *
1429  *		0		Xfer completed successfully.
1430  *
1431  *		ERESTART	Xfer had an error, but was restarted.
1432  *
1433  *		anything else	Xfer had an error, return value is Unix
1434  *				errno.
1435  *
1436  *	If the return value is anything but ERESTART:
1437  *
1438  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1439  *		  the pool.
1440  *		- If there is a buf associated with the xfer,
1441  *		  it has been biodone()'d.
1442  */
1443 static int
1444 scsipi_complete(struct scsipi_xfer *xs)
1445 {
1446 	struct scsipi_periph *periph = xs->xs_periph;
1447 	struct scsipi_channel *chan = periph->periph_channel;
1448 	int error, s;
1449 
1450 #ifdef DIAGNOSTIC
1451 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1452 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1453 #endif
1454 	/*
1455 	 * If command terminated with a CHECK CONDITION, we need to issue a
1456 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1457 	 * we'll have the real status.
1458 	 * Must be processed at splbio() to avoid missing a SCSI bus reset
1459 	 * for this command.
1460 	 */
1461 	s = splbio();
1462 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1463 		/* request sense for a request sense ? */
1464 		if (xs->xs_control & XS_CTL_REQSENSE) {
1465 			scsipi_printaddr(periph);
1466 			printf("request sense for a request sense ?\n");
1467 			/* XXX maybe we should reset the device ? */
1468 			/* we've been frozen because xs->error != XS_NOERROR */
1469 			scsipi_periph_thaw(periph, 1);
1470 			splx(s);
1471 			if (xs->resid < xs->datalen) {
1472 				printf("we read %d bytes of sense anyway:\n",
1473 				    xs->datalen - xs->resid);
1474 #ifdef SCSIVERBOSE
1475 				scsipi_print_sense_data((void *)xs->data, 0);
1476 #endif
1477 			}
1478 			return EINVAL;
1479 		}
1480 		scsipi_request_sense(xs);
1481 	}
1482 	splx(s);
1483 
1484 	/*
1485 	 * If it's a user level request, bypass all usual completion
1486 	 * processing, let the user work it out..
1487 	 */
1488 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1489 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1490 		if (xs->error != XS_NOERROR)
1491 			scsipi_periph_thaw(periph, 1);
1492 		scsipi_user_done(xs);
1493 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1494 		return 0;
1495 	}
1496 
1497 	switch (xs->error) {
1498 	case XS_NOERROR:
1499 		error = 0;
1500 		break;
1501 
1502 	case XS_SENSE:
1503 	case XS_SHORTSENSE:
1504 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1505 		break;
1506 
1507 	case XS_RESOURCE_SHORTAGE:
1508 		/*
1509 		 * XXX Should freeze channel's queue.
1510 		 */
1511 		scsipi_printaddr(periph);
1512 		printf("adapter resource shortage\n");
1513 		/* FALLTHROUGH */
1514 
1515 	case XS_BUSY:
1516 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1517 			struct scsipi_max_openings mo;
1518 
1519 			/*
1520 			 * We set the openings to active - 1, assuming that
1521 			 * the command that got us here is the first one that
1522 			 * can't fit into the device's queue.  If that's not
1523 			 * the case, I guess we'll find out soon enough.
1524 			 */
1525 			mo.mo_target = periph->periph_target;
1526 			mo.mo_lun = periph->periph_lun;
1527 			if (periph->periph_active < periph->periph_openings)
1528 				mo.mo_openings = periph->periph_active - 1;
1529 			else
1530 				mo.mo_openings = periph->periph_openings - 1;
1531 #ifdef DIAGNOSTIC
1532 			if (mo.mo_openings < 0) {
1533 				scsipi_printaddr(periph);
1534 				printf("QUEUE FULL resulted in < 0 openings\n");
1535 				panic("scsipi_done");
1536 			}
1537 #endif
1538 			if (mo.mo_openings == 0) {
1539 				scsipi_printaddr(periph);
1540 				printf("QUEUE FULL resulted in 0 openings\n");
1541 				mo.mo_openings = 1;
1542 			}
1543 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1544 			error = ERESTART;
1545 		} else if (xs->xs_retries != 0) {
1546 			xs->xs_retries--;
1547 			/*
1548 			 * Wait one second, and try again.
1549 			 */
1550 			if ((xs->xs_control & XS_CTL_POLL) ||
1551 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1552 				delay(1000000);
1553 			} else if (!callout_pending(&periph->periph_callout)) {
1554 				scsipi_periph_freeze(periph, 1);
1555 				callout_reset(&periph->periph_callout,
1556 				    hz, scsipi_periph_timed_thaw, periph);
1557 			}
1558 			error = ERESTART;
1559 		} else
1560 			error = EBUSY;
1561 		break;
1562 
1563 	case XS_REQUEUE:
1564 		error = ERESTART;
1565 		break;
1566 
1567 	case XS_SELTIMEOUT:
1568 	case XS_TIMEOUT:
1569 		/*
1570 		 * If the device hasn't gone away, honor retry counts.
1571 		 *
1572 		 * Note that if we're in the middle of probing it,
1573 		 * it won't be found because it isn't here yet so
1574 		 * we won't honor the retry count in that case.
1575 		 */
1576 		if (scsipi_lookup_periph(chan, periph->periph_target,
1577 		    periph->periph_lun) && xs->xs_retries != 0) {
1578 			xs->xs_retries--;
1579 			error = ERESTART;
1580 		} else
1581 			error = EIO;
1582 		break;
1583 
1584 	case XS_RESET:
1585 		if (xs->xs_control & XS_CTL_REQSENSE) {
1586 			/*
1587 			 * request sense interrupted by reset: signal it
1588 			 * with EINTR return code.
1589 			 */
1590 			error = EINTR;
1591 		} else {
1592 			if (xs->xs_retries != 0) {
1593 				xs->xs_retries--;
1594 				error = ERESTART;
1595 			} else
1596 				error = EIO;
1597 		}
1598 		break;
1599 
1600 	case XS_DRIVER_STUFFUP:
1601 		scsipi_printaddr(periph);
1602 		printf("generic HBA error\n");
1603 		error = EIO;
1604 		break;
1605 	default:
1606 		scsipi_printaddr(periph);
1607 		printf("invalid return code from adapter: %d\n", xs->error);
1608 		error = EIO;
1609 		break;
1610 	}
1611 
1612 	s = splbio();
1613 	if (error == ERESTART) {
1614 		/*
1615 		 * If we get here, the periph has been thawed and frozen
1616 		 * again if we had to issue recovery commands.  Alternatively,
1617 		 * it may have been frozen again and in a timed thaw.  In
1618 		 * any case, we thaw the periph once we re-enqueue the
1619 		 * command.  Once the periph is fully thawed, it will begin
1620 		 * operation again.
1621 		 */
1622 		xs->error = XS_NOERROR;
1623 		xs->status = SCSI_OK;
1624 		xs->xs_status &= ~XS_STS_DONE;
1625 		xs->xs_requeuecnt++;
1626 		error = scsipi_enqueue(xs);
1627 		if (error == 0) {
1628 			scsipi_periph_thaw(periph, 1);
1629 			splx(s);
1630 			return (ERESTART);
1631 		}
1632 	}
1633 
1634 	/*
1635 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1636 	 * Thaw it here.
1637 	 */
1638 	if (xs->error != XS_NOERROR)
1639 		scsipi_periph_thaw(periph, 1);
1640 
1641 	if (periph->periph_switch->psw_done)
1642 		periph->periph_switch->psw_done(xs, error);
1643 
1644 	if (xs->xs_control & XS_CTL_ASYNC)
1645 		scsipi_put_xs(xs);
1646 	splx(s);
1647 
1648 	return (error);
1649 }
1650 
1651 /*
1652  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1653  * returns with a CHECK_CONDITION status. Must be called in valid thread
1654  * context and at splbio().
1655  */
1656 
1657 static void
1658 scsipi_request_sense(struct scsipi_xfer *xs)
1659 {
1660 	struct scsipi_periph *periph = xs->xs_periph;
1661 	int flags, error;
1662 	struct scsi_request_sense cmd;
1663 
1664 	periph->periph_flags |= PERIPH_SENSE;
1665 
1666 	/* if command was polling, request sense will too */
1667 	flags = xs->xs_control & XS_CTL_POLL;
1668 	/* Polling commands can't sleep */
1669 	if (flags)
1670 		flags |= XS_CTL_NOSLEEP;
1671 
1672 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1673 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1674 
1675 	memset(&cmd, 0, sizeof(cmd));
1676 	cmd.opcode = SCSI_REQUEST_SENSE;
1677 	cmd.length = sizeof(struct scsi_sense_data);
1678 
1679 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1680 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1681 	    0, 1000, NULL, flags);
1682 	periph->periph_flags &= ~PERIPH_SENSE;
1683 	periph->periph_xscheck = NULL;
1684 	switch (error) {
1685 	case 0:
1686 		/* we have a valid sense */
1687 		xs->error = XS_SENSE;
1688 		return;
1689 	case EINTR:
1690 		/* REQUEST_SENSE interrupted by bus reset. */
1691 		xs->error = XS_RESET;
1692 		return;
1693 	case EIO:
1694 		 /* request sense coudn't be performed */
1695 		/*
1696 		 * XXX this isn't quite right but we don't have anything
1697 		 * better for now
1698 		 */
1699 		xs->error = XS_DRIVER_STUFFUP;
1700 		return;
1701 	default:
1702 		 /* Notify that request sense failed. */
1703 		xs->error = XS_DRIVER_STUFFUP;
1704 		scsipi_printaddr(periph);
1705 		printf("request sense failed with error %d\n", error);
1706 		return;
1707 	}
1708 }
1709 
1710 /*
1711  * scsipi_enqueue:
1712  *
1713  *	Enqueue an xfer on a channel.
1714  */
1715 static int
1716 scsipi_enqueue(struct scsipi_xfer *xs)
1717 {
1718 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1719 	struct scsipi_xfer *qxs;
1720 	int s;
1721 
1722 	s = splbio();
1723 
1724 	/*
1725 	 * If the xfer is to be polled, and there are already jobs on
1726 	 * the queue, we can't proceed.
1727 	 */
1728 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1729 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1730 		splx(s);
1731 		xs->error = XS_DRIVER_STUFFUP;
1732 		return (EAGAIN);
1733 	}
1734 
1735 	/*
1736 	 * If we have an URGENT xfer, it's an error recovery command
1737 	 * and it should just go on the head of the channel's queue.
1738 	 */
1739 	if (xs->xs_control & XS_CTL_URGENT) {
1740 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
1741 		goto out;
1742 	}
1743 
1744 	/*
1745 	 * If this xfer has already been on the queue before, we
1746 	 * need to reinsert it in the correct order.  That order is:
1747 	 *
1748 	 *	Immediately before the first xfer for this periph
1749 	 *	with a requeuecnt less than xs->xs_requeuecnt.
1750 	 *
1751 	 * Failing that, at the end of the queue.  (We'll end up
1752 	 * there naturally.)
1753 	 */
1754 	if (xs->xs_requeuecnt != 0) {
1755 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
1756 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
1757 			if (qxs->xs_periph == xs->xs_periph &&
1758 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
1759 				break;
1760 		}
1761 		if (qxs != NULL) {
1762 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
1763 			    channel_q);
1764 			goto out;
1765 		}
1766 	}
1767 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
1768  out:
1769 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
1770 		scsipi_periph_thaw(xs->xs_periph, 1);
1771 	splx(s);
1772 	return (0);
1773 }
1774 
1775 /*
1776  * scsipi_run_queue:
1777  *
1778  *	Start as many xfers as possible running on the channel.
1779  */
1780 static void
1781 scsipi_run_queue(struct scsipi_channel *chan)
1782 {
1783 	struct scsipi_xfer *xs;
1784 	struct scsipi_periph *periph;
1785 	int s;
1786 
1787 	for (;;) {
1788 		s = splbio();
1789 
1790 		/*
1791 		 * If the channel is frozen, we can't do any work right
1792 		 * now.
1793 		 */
1794 		if (chan->chan_qfreeze != 0) {
1795 			splx(s);
1796 			return;
1797 		}
1798 
1799 		/*
1800 		 * Look for work to do, and make sure we can do it.
1801 		 */
1802 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
1803 		     xs = TAILQ_NEXT(xs, channel_q)) {
1804 			periph = xs->xs_periph;
1805 
1806 			if ((periph->periph_sent >= periph->periph_openings) ||
1807 			    periph->periph_qfreeze != 0 ||
1808 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
1809 				continue;
1810 
1811 			if ((periph->periph_flags &
1812 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
1813 			    (xs->xs_control & XS_CTL_URGENT) == 0)
1814 				continue;
1815 
1816 			/*
1817 			 * We can issue this xfer!
1818 			 */
1819 			goto got_one;
1820 		}
1821 
1822 		/*
1823 		 * Can't find any work to do right now.
1824 		 */
1825 		splx(s);
1826 		return;
1827 
1828  got_one:
1829 		/*
1830 		 * Have an xfer to run.  Allocate a resource from
1831 		 * the adapter to run it.  If we can't allocate that
1832 		 * resource, we don't dequeue the xfer.
1833 		 */
1834 		if (scsipi_get_resource(chan) == 0) {
1835 			/*
1836 			 * Adapter is out of resources.  If the adapter
1837 			 * supports it, attempt to grow them.
1838 			 */
1839 			if (scsipi_grow_resources(chan) == 0) {
1840 				/*
1841 				 * Wasn't able to grow resources,
1842 				 * nothing more we can do.
1843 				 */
1844 				if (xs->xs_control & XS_CTL_POLL) {
1845 					scsipi_printaddr(xs->xs_periph);
1846 					printf("polling command but no "
1847 					    "adapter resources");
1848 					/* We'll panic shortly... */
1849 				}
1850 				splx(s);
1851 
1852 				/*
1853 				 * XXX: We should be able to note that
1854 				 * XXX: that resources are needed here!
1855 				 */
1856 				return;
1857 			}
1858 			/*
1859 			 * scsipi_grow_resources() allocated the resource
1860 			 * for us.
1861 			 */
1862 		}
1863 
1864 		/*
1865 		 * We have a resource to run this xfer, do it!
1866 		 */
1867 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
1868 
1869 		/*
1870 		 * If the command is to be tagged, allocate a tag ID
1871 		 * for it.
1872 		 */
1873 		if (XS_CTL_TAGTYPE(xs) != 0)
1874 			scsipi_get_tag(xs);
1875 		else
1876 			periph->periph_flags |= PERIPH_UNTAG;
1877 		periph->periph_sent++;
1878 		splx(s);
1879 
1880 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
1881 	}
1882 #ifdef DIAGNOSTIC
1883 	panic("scsipi_run_queue: impossible");
1884 #endif
1885 }
1886 
1887 /*
1888  * scsipi_execute_xs:
1889  *
1890  *	Begin execution of an xfer, waiting for it to complete, if necessary.
1891  */
1892 int
1893 scsipi_execute_xs(struct scsipi_xfer *xs)
1894 {
1895 	struct scsipi_periph *periph = xs->xs_periph;
1896 	struct scsipi_channel *chan = periph->periph_channel;
1897 	int oasync, async, poll, error, s;
1898 
1899 	KASSERT(!cold);
1900 
1901 	(chan->chan_bustype->bustype_cmd)(xs);
1902 
1903 	if (xs->xs_control & XS_CTL_DATA_ONSTACK) {
1904 #if 1
1905 		if (xs->xs_control & XS_CTL_ASYNC)
1906 			panic("scsipi_execute_xs: on stack and async");
1907 #endif
1908 		/*
1909 		 * If the I/O buffer is allocated on stack, the
1910 		 * process must NOT be swapped out, as the device will
1911 		 * be accessing the stack.
1912 		 */
1913 		PHOLD(curlwp);
1914 	}
1915 
1916 	xs->xs_status &= ~XS_STS_DONE;
1917 	xs->error = XS_NOERROR;
1918 	xs->resid = xs->datalen;
1919 	xs->status = SCSI_OK;
1920 
1921 #ifdef SCSIPI_DEBUG
1922 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
1923 		printf("scsipi_execute_xs: ");
1924 		show_scsipi_xs(xs);
1925 		printf("\n");
1926 	}
1927 #endif
1928 
1929 	/*
1930 	 * Deal with command tagging:
1931 	 *
1932 	 *	- If the device's current operating mode doesn't
1933 	 *	  include tagged queueing, clear the tag mask.
1934 	 *
1935 	 *	- If the device's current operating mode *does*
1936 	 *	  include tagged queueing, set the tag_type in
1937 	 *	  the xfer to the appropriate byte for the tag
1938 	 *	  message.
1939 	 */
1940 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
1941 		(xs->xs_control & XS_CTL_REQSENSE)) {
1942 		xs->xs_control &= ~XS_CTL_TAGMASK;
1943 		xs->xs_tag_type = 0;
1944 	} else {
1945 		/*
1946 		 * If the request doesn't specify a tag, give Head
1947 		 * tags to URGENT operations and Ordered tags to
1948 		 * everything else.
1949 		 */
1950 		if (XS_CTL_TAGTYPE(xs) == 0) {
1951 			if (xs->xs_control & XS_CTL_URGENT)
1952 				xs->xs_control |= XS_CTL_HEAD_TAG;
1953 			else
1954 				xs->xs_control |= XS_CTL_ORDERED_TAG;
1955 		}
1956 
1957 		switch (XS_CTL_TAGTYPE(xs)) {
1958 		case XS_CTL_ORDERED_TAG:
1959 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
1960 			break;
1961 
1962 		case XS_CTL_SIMPLE_TAG:
1963 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
1964 			break;
1965 
1966 		case XS_CTL_HEAD_TAG:
1967 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
1968 			break;
1969 
1970 		default:
1971 			scsipi_printaddr(periph);
1972 			printf("invalid tag mask 0x%08x\n",
1973 			    XS_CTL_TAGTYPE(xs));
1974 			panic("scsipi_execute_xs");
1975 		}
1976 	}
1977 
1978 	/* If the adaptor wants us to poll, poll. */
1979 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
1980 		xs->xs_control |= XS_CTL_POLL;
1981 
1982 	/*
1983 	 * If we don't yet have a completion thread, or we are to poll for
1984 	 * completion, clear the ASYNC flag.
1985 	 */
1986 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
1987 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
1988 		xs->xs_control &= ~XS_CTL_ASYNC;
1989 
1990 	async = (xs->xs_control & XS_CTL_ASYNC);
1991 	poll = (xs->xs_control & XS_CTL_POLL);
1992 
1993 #ifdef DIAGNOSTIC
1994 	if (oasync != 0 && xs->bp == NULL)
1995 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
1996 #endif
1997 
1998 	/*
1999 	 * Enqueue the transfer.  If we're not polling for completion, this
2000 	 * should ALWAYS return `no error'.
2001 	 */
2002 	error = scsipi_enqueue(xs);
2003 	if (error) {
2004 		if (poll == 0) {
2005 			scsipi_printaddr(periph);
2006 			printf("not polling, but enqueue failed with %d\n",
2007 			    error);
2008 			panic("scsipi_execute_xs");
2009 		}
2010 
2011 		scsipi_printaddr(periph);
2012 		printf("should have flushed queue?\n");
2013 		goto free_xs;
2014 	}
2015 
2016  restarted:
2017 	scsipi_run_queue(chan);
2018 
2019 	/*
2020 	 * The xfer is enqueued, and possibly running.  If it's to be
2021 	 * completed asynchronously, just return now.
2022 	 */
2023 	if (async)
2024 		return (0);
2025 
2026 	/*
2027 	 * Not an asynchronous command; wait for it to complete.
2028 	 */
2029 	s = splbio();
2030 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2031 		if (poll) {
2032 			scsipi_printaddr(periph);
2033 			printf("polling command not done\n");
2034 			panic("scsipi_execute_xs");
2035 		}
2036 		(void) tsleep(xs, PRIBIO, "xscmd", 0);
2037 	}
2038 	splx(s);
2039 
2040 	/*
2041 	 * Command is complete.  scsipi_done() has awakened us to perform
2042 	 * the error handling.
2043 	 */
2044 	error = scsipi_complete(xs);
2045 	if (error == ERESTART)
2046 		goto restarted;
2047 
2048 	/*
2049 	 * If it was meant to run async and we cleared aync ourselve,
2050 	 * don't return an error here. It has already been handled
2051 	 */
2052 	if (oasync)
2053 		error = 0;
2054 	/*
2055 	 * Command completed successfully or fatal error occurred.  Fall
2056 	 * into....
2057 	 */
2058  free_xs:
2059 	if (xs->xs_control & XS_CTL_DATA_ONSTACK)
2060 		PRELE(curlwp);
2061 
2062 	s = splbio();
2063 	scsipi_put_xs(xs);
2064 	splx(s);
2065 
2066 	/*
2067 	 * Kick the queue, keep it running in case it stopped for some
2068 	 * reason.
2069 	 */
2070 	scsipi_run_queue(chan);
2071 
2072 	return (error);
2073 }
2074 
2075 /*
2076  * scsipi_completion_thread:
2077  *
2078  *	This is the completion thread.  We wait for errors on
2079  *	asynchronous xfers, and perform the error handling
2080  *	function, restarting the command, if necessary.
2081  */
2082 static void
2083 scsipi_completion_thread(void *arg)
2084 {
2085 	struct scsipi_channel *chan = arg;
2086 	struct scsipi_xfer *xs;
2087 	int s;
2088 
2089 	if (chan->chan_init_cb)
2090 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2091 
2092 	s = splbio();
2093 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2094 	splx(s);
2095 	for (;;) {
2096 		s = splbio();
2097 		xs = TAILQ_FIRST(&chan->chan_complete);
2098 		if (xs == NULL && chan->chan_tflags  == 0) {
2099 			/* nothing to do; wait */
2100 			(void) tsleep(&chan->chan_complete, PRIBIO,
2101 			    "sccomp", 0);
2102 			splx(s);
2103 			continue;
2104 		}
2105 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2106 			/* call chan_callback from thread context */
2107 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2108 			chan->chan_callback(chan, chan->chan_callback_arg);
2109 			splx(s);
2110 			continue;
2111 		}
2112 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2113 			/* attempt to get more openings for this channel */
2114 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2115 			scsipi_adapter_request(chan,
2116 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2117 			scsipi_channel_thaw(chan, 1);
2118 			splx(s);
2119 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2120 				preempt(1);
2121 			}
2122 			continue;
2123 		}
2124 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2125 			/* explicitly run the queues for this channel */
2126 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2127 			scsipi_run_queue(chan);
2128 			splx(s);
2129 			continue;
2130 		}
2131 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2132 			splx(s);
2133 			break;
2134 		}
2135 		if (xs) {
2136 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2137 			splx(s);
2138 
2139 			/*
2140 			 * Have an xfer with an error; process it.
2141 			 */
2142 			(void) scsipi_complete(xs);
2143 
2144 			/*
2145 			 * Kick the queue; keep it running if it was stopped
2146 			 * for some reason.
2147 			 */
2148 			scsipi_run_queue(chan);
2149 		} else {
2150 			splx(s);
2151 		}
2152 	}
2153 
2154 	chan->chan_thread = NULL;
2155 
2156 	/* In case parent is waiting for us to exit. */
2157 	wakeup(&chan->chan_thread);
2158 
2159 	kthread_exit(0);
2160 }
2161 
2162 /*
2163  * scsipi_create_completion_thread:
2164  *
2165  *	Callback to actually create the completion thread.
2166  */
2167 void
2168 scsipi_create_completion_thread(void *arg)
2169 {
2170 	struct scsipi_channel *chan = arg;
2171 	struct scsipi_adapter *adapt = chan->chan_adapter;
2172 
2173 	if (kthread_create1(scsipi_completion_thread, chan,
2174 	    &chan->chan_thread, "%s", chan->chan_name)) {
2175 		printf("%s: unable to create completion thread for "
2176 		    "channel %d\n", adapt->adapt_dev->dv_xname,
2177 		    chan->chan_channel);
2178 		panic("scsipi_create_completion_thread");
2179 	}
2180 }
2181 
2182 /*
2183  * scsipi_thread_call_callback:
2184  *
2185  * 	request to call a callback from the completion thread
2186  */
2187 int
2188 scsipi_thread_call_callback(struct scsipi_channel *chan,
2189     void (*callback)(struct scsipi_channel *, void *), void *arg)
2190 {
2191 	int s;
2192 
2193 	s = splbio();
2194 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2195 		/* kernel thread doesn't exist yet */
2196 		splx(s);
2197 		return ESRCH;
2198 	}
2199 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2200 		splx(s);
2201 		return EBUSY;
2202 	}
2203 	scsipi_channel_freeze(chan, 1);
2204 	chan->chan_callback = callback;
2205 	chan->chan_callback_arg = arg;
2206 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2207 	wakeup(&chan->chan_complete);
2208 	splx(s);
2209 	return(0);
2210 }
2211 
2212 /*
2213  * scsipi_async_event:
2214  *
2215  *	Handle an asynchronous event from an adapter.
2216  */
2217 void
2218 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2219     void *arg)
2220 {
2221 	int s;
2222 
2223 	s = splbio();
2224 	switch (event) {
2225 	case ASYNC_EVENT_MAX_OPENINGS:
2226 		scsipi_async_event_max_openings(chan,
2227 		    (struct scsipi_max_openings *)arg);
2228 		break;
2229 
2230 	case ASYNC_EVENT_XFER_MODE:
2231 		scsipi_async_event_xfer_mode(chan,
2232 		    (struct scsipi_xfer_mode *)arg);
2233 		break;
2234 	case ASYNC_EVENT_RESET:
2235 		scsipi_async_event_channel_reset(chan);
2236 		break;
2237 	}
2238 	splx(s);
2239 }
2240 
2241 /*
2242  * scsipi_print_xfer_mode:
2243  *
2244  *	Print a periph's capabilities.
2245  */
2246 void
2247 scsipi_print_xfer_mode(struct scsipi_periph *periph)
2248 {
2249 	int period, freq, speed, mbs;
2250 
2251 	if ((periph->periph_flags & PERIPH_MODE_VALID) == 0)
2252 		return;
2253 
2254 	aprint_normal("%s: ", periph->periph_dev->dv_xname);
2255 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2256 		period = scsipi_sync_factor_to_period(periph->periph_period);
2257 		aprint_normal("sync (%d.%02dns offset %d)",
2258 		    period / 100, period % 100, periph->periph_offset);
2259 	} else
2260 		aprint_normal("async");
2261 
2262 	if (periph->periph_mode & PERIPH_CAP_WIDE32)
2263 		aprint_normal(", 32-bit");
2264 	else if (periph->periph_mode & (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2265 		aprint_normal(", 16-bit");
2266 	else
2267 		aprint_normal(", 8-bit");
2268 
2269 	if (periph->periph_mode & (PERIPH_CAP_SYNC | PERIPH_CAP_DT)) {
2270 		freq = scsipi_sync_factor_to_freq(periph->periph_period);
2271 		speed = freq;
2272 		if (periph->periph_mode & PERIPH_CAP_WIDE32)
2273 			speed *= 4;
2274 		else if (periph->periph_mode &
2275 		    (PERIPH_CAP_WIDE16 | PERIPH_CAP_DT))
2276 			speed *= 2;
2277 		mbs = speed / 1000;
2278 		if (mbs > 0)
2279 			aprint_normal(" (%d.%03dMB/s)", mbs, speed % 1000);
2280 		else
2281 			aprint_normal(" (%dKB/s)", speed % 1000);
2282 	}
2283 
2284 	aprint_normal(" transfers");
2285 
2286 	if (periph->periph_mode & PERIPH_CAP_TQING)
2287 		aprint_normal(", tagged queueing");
2288 
2289 	aprint_normal("\n");
2290 }
2291 
2292 /*
2293  * scsipi_async_event_max_openings:
2294  *
2295  *	Update the maximum number of outstanding commands a
2296  *	device may have.
2297  */
2298 static void
2299 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2300     struct scsipi_max_openings *mo)
2301 {
2302 	struct scsipi_periph *periph;
2303 	int minlun, maxlun;
2304 
2305 	if (mo->mo_lun == -1) {
2306 		/*
2307 		 * Wildcarded; apply it to all LUNs.
2308 		 */
2309 		minlun = 0;
2310 		maxlun = chan->chan_nluns - 1;
2311 	} else
2312 		minlun = maxlun = mo->mo_lun;
2313 
2314 	/* XXX This could really suck with a large LUN space. */
2315 	for (; minlun <= maxlun; minlun++) {
2316 		periph = scsipi_lookup_periph(chan, mo->mo_target, minlun);
2317 		if (periph == NULL)
2318 			continue;
2319 
2320 		if (mo->mo_openings < periph->periph_openings)
2321 			periph->periph_openings = mo->mo_openings;
2322 		else if (mo->mo_openings > periph->periph_openings &&
2323 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2324 			periph->periph_openings = mo->mo_openings;
2325 	}
2326 }
2327 
2328 /*
2329  * scsipi_async_event_xfer_mode:
2330  *
2331  *	Update the xfer mode for all periphs sharing the
2332  *	specified I_T Nexus.
2333  */
2334 static void
2335 scsipi_async_event_xfer_mode(struct scsipi_channel *chan,
2336     struct scsipi_xfer_mode *xm)
2337 {
2338 	struct scsipi_periph *periph;
2339 	int lun, announce, mode, period, offset;
2340 
2341 	for (lun = 0; lun < chan->chan_nluns; lun++) {
2342 		periph = scsipi_lookup_periph(chan, xm->xm_target, lun);
2343 		if (periph == NULL)
2344 			continue;
2345 		announce = 0;
2346 
2347 		/*
2348 		 * Clamp the xfer mode down to this periph's capabilities.
2349 		 */
2350 		mode = xm->xm_mode & periph->periph_cap;
2351 		if (mode & PERIPH_CAP_SYNC) {
2352 			period = xm->xm_period;
2353 			offset = xm->xm_offset;
2354 		} else {
2355 			period = 0;
2356 			offset = 0;
2357 		}
2358 
2359 		/*
2360 		 * If we do not have a valid xfer mode yet, or the parameters
2361 		 * are different, announce them.
2362 		 */
2363 		if ((periph->periph_flags & PERIPH_MODE_VALID) == 0 ||
2364 		    periph->periph_mode != mode ||
2365 		    periph->periph_period != period ||
2366 		    periph->periph_offset != offset)
2367 			announce = 1;
2368 
2369 		periph->periph_mode = mode;
2370 		periph->periph_period = period;
2371 		periph->periph_offset = offset;
2372 		periph->periph_flags |= PERIPH_MODE_VALID;
2373 
2374 		if (announce)
2375 			scsipi_print_xfer_mode(periph);
2376 	}
2377 }
2378 
2379 /*
2380  * scsipi_set_xfer_mode:
2381  *
2382  *	Set the xfer mode for the specified I_T Nexus.
2383  */
2384 void
2385 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2386 {
2387 	struct scsipi_xfer_mode xm;
2388 	struct scsipi_periph *itperiph;
2389 	int lun, s;
2390 
2391 	/*
2392 	 * Go to the minimal xfer mode.
2393 	 */
2394 	xm.xm_target = target;
2395 	xm.xm_mode = 0;
2396 	xm.xm_period = 0;			/* ignored */
2397 	xm.xm_offset = 0;			/* ignored */
2398 
2399 	/*
2400 	 * Find the first LUN we know about on this I_T Nexus.
2401 	 */
2402 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2403 		itperiph = scsipi_lookup_periph(chan, target, lun);
2404 		if (itperiph != NULL)
2405 			break;
2406 	}
2407 	if (itperiph != NULL) {
2408 		xm.xm_mode = itperiph->periph_cap;
2409 		/*
2410 		 * Now issue the request to the adapter.
2411 		 */
2412 		s = splbio();
2413 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2414 		splx(s);
2415 		/*
2416 		 * If we want this to happen immediately, issue a dummy
2417 		 * command, since most adapters can't really negotiate unless
2418 		 * they're executing a job.
2419 		 */
2420 		if (immed != 0) {
2421 			(void) scsipi_test_unit_ready(itperiph,
2422 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2423 			    XS_CTL_IGNORE_NOT_READY |
2424 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2425 		}
2426 	}
2427 }
2428 
2429 /*
2430  * scsipi_channel_reset:
2431  *
2432  *	handle scsi bus reset
2433  * called at splbio
2434  */
2435 static void
2436 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2437 {
2438 	struct scsipi_xfer *xs, *xs_next;
2439 	struct scsipi_periph *periph;
2440 	int target, lun;
2441 
2442 	/*
2443 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2444 	 * commands; as the sense is not available any more.
2445 	 * can't call scsipi_done() from here, as the command has not been
2446 	 * sent to the adapter yet (this would corrupt accounting).
2447 	 */
2448 
2449 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2450 		xs_next = TAILQ_NEXT(xs, channel_q);
2451 		if (xs->xs_control & XS_CTL_REQSENSE) {
2452 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2453 			xs->error = XS_RESET;
2454 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2455 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2456 				    channel_q);
2457 		}
2458 	}
2459 	wakeup(&chan->chan_complete);
2460 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2461 	for (target = 0; target < chan->chan_ntargets; target++) {
2462 		if (target == chan->chan_id)
2463 			continue;
2464 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2465 			periph = scsipi_lookup_periph(chan, target, lun);
2466 			if (periph) {
2467 				xs = periph->periph_xscheck;
2468 				if (xs)
2469 					xs->error = XS_RESET;
2470 			}
2471 		}
2472 	}
2473 }
2474 
2475 /*
2476  * scsipi_target_detach:
2477  *
2478  *	detach all periph associated with a I_T
2479  * 	must be called from valid thread context
2480  */
2481 int
2482 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2483     int flags)
2484 {
2485 	struct scsipi_periph *periph;
2486 	int ctarget, mintarget, maxtarget;
2487 	int clun, minlun, maxlun;
2488 	int error;
2489 
2490 	if (target == -1) {
2491 		mintarget = 0;
2492 		maxtarget = chan->chan_ntargets;
2493 	} else {
2494 		if (target == chan->chan_id)
2495 			return EINVAL;
2496 		if (target < 0 || target >= chan->chan_ntargets)
2497 			return EINVAL;
2498 		mintarget = target;
2499 		maxtarget = target + 1;
2500 	}
2501 
2502 	if (lun == -1) {
2503 		minlun = 0;
2504 		maxlun = chan->chan_nluns;
2505 	} else {
2506 		if (lun < 0 || lun >= chan->chan_nluns)
2507 			return EINVAL;
2508 		minlun = lun;
2509 		maxlun = lun + 1;
2510 	}
2511 
2512 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2513 		if (ctarget == chan->chan_id)
2514 			continue;
2515 
2516 		for (clun = minlun; clun < maxlun; clun++) {
2517 			periph = scsipi_lookup_periph(chan, ctarget, clun);
2518 			if (periph == NULL)
2519 				continue;
2520 			error = config_detach(periph->periph_dev, flags);
2521 			if (error)
2522 				return (error);
2523 		}
2524 	}
2525 	return(0);
2526 }
2527 
2528 /*
2529  * scsipi_adapter_addref:
2530  *
2531  *	Add a reference to the adapter pointed to by the provided
2532  *	link, enabling the adapter if necessary.
2533  */
2534 int
2535 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2536 {
2537 	int s, error = 0;
2538 
2539 	s = splbio();
2540 	if (adapt->adapt_refcnt++ == 0 && adapt->adapt_enable != NULL) {
2541 		error = (*adapt->adapt_enable)(adapt->adapt_dev, 1);
2542 		if (error)
2543 			adapt->adapt_refcnt--;
2544 	}
2545 	splx(s);
2546 	return (error);
2547 }
2548 
2549 /*
2550  * scsipi_adapter_delref:
2551  *
2552  *	Delete a reference to the adapter pointed to by the provided
2553  *	link, disabling the adapter if possible.
2554  */
2555 void
2556 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2557 {
2558 	int s;
2559 
2560 	s = splbio();
2561 	if (adapt->adapt_refcnt-- == 1 && adapt->adapt_enable != NULL)
2562 		(void) (*adapt->adapt_enable)(adapt->adapt_dev, 0);
2563 	splx(s);
2564 }
2565 
2566 static struct scsipi_syncparam {
2567 	int	ss_factor;
2568 	int	ss_period;	/* ns * 100 */
2569 } scsipi_syncparams[] = {
2570 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2571 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2572 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2573 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2574 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2575 };
2576 static const int scsipi_nsyncparams =
2577     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2578 
2579 int
2580 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2581 {
2582 	int i;
2583 
2584 	for (i = 0; i < scsipi_nsyncparams; i++) {
2585 		if (period <= scsipi_syncparams[i].ss_period)
2586 			return (scsipi_syncparams[i].ss_factor);
2587 	}
2588 
2589 	return ((period / 100) / 4);
2590 }
2591 
2592 int
2593 scsipi_sync_factor_to_period(int factor)
2594 {
2595 	int i;
2596 
2597 	for (i = 0; i < scsipi_nsyncparams; i++) {
2598 		if (factor == scsipi_syncparams[i].ss_factor)
2599 			return (scsipi_syncparams[i].ss_period);
2600 	}
2601 
2602 	return ((factor * 4) * 100);
2603 }
2604 
2605 int
2606 scsipi_sync_factor_to_freq(int factor)
2607 {
2608 	int i;
2609 
2610 	for (i = 0; i < scsipi_nsyncparams; i++) {
2611 		if (factor == scsipi_syncparams[i].ss_factor)
2612 			return (100000000 / scsipi_syncparams[i].ss_period);
2613 	}
2614 
2615 	return (10000000 / ((factor * 4) * 10));
2616 }
2617 
2618 #ifdef SCSIPI_DEBUG
2619 /*
2620  * Given a scsipi_xfer, dump the request, in all it's glory
2621  */
2622 void
2623 show_scsipi_xs(struct scsipi_xfer *xs)
2624 {
2625 
2626 	printf("xs(%p): ", xs);
2627 	printf("xs_control(0x%08x)", xs->xs_control);
2628 	printf("xs_status(0x%08x)", xs->xs_status);
2629 	printf("periph(%p)", xs->xs_periph);
2630 	printf("retr(0x%x)", xs->xs_retries);
2631 	printf("timo(0x%x)", xs->timeout);
2632 	printf("cmd(%p)", xs->cmd);
2633 	printf("len(0x%x)", xs->cmdlen);
2634 	printf("data(%p)", xs->data);
2635 	printf("len(0x%x)", xs->datalen);
2636 	printf("res(0x%x)", xs->resid);
2637 	printf("err(0x%x)", xs->error);
2638 	printf("bp(%p)", xs->bp);
2639 	show_scsipi_cmd(xs);
2640 }
2641 
2642 void
2643 show_scsipi_cmd(struct scsipi_xfer *xs)
2644 {
2645 	u_char *b = (u_char *) xs->cmd;
2646 	int i = 0;
2647 
2648 	scsipi_printaddr(xs->xs_periph);
2649 	printf(" command: ");
2650 
2651 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2652 		while (i < xs->cmdlen) {
2653 			if (i)
2654 				printf(",");
2655 			printf("0x%x", b[i++]);
2656 		}
2657 		printf("-[%d bytes]\n", xs->datalen);
2658 		if (xs->datalen)
2659 			show_mem(xs->data, min(64, xs->datalen));
2660 	} else
2661 		printf("-RESET-\n");
2662 }
2663 
2664 void
2665 show_mem(u_char *address, int num)
2666 {
2667 	int x;
2668 
2669 	printf("------------------------------");
2670 	for (x = 0; x < num; x++) {
2671 		if ((x % 16) == 0)
2672 			printf("\n%03d: ", x);
2673 		printf("%02x ", *address++);
2674 	}
2675 	printf("\n------------------------------\n");
2676 }
2677 #endif /* SCSIPI_DEBUG */
2678