xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision 33881f779a77dce6440bdc44610d94de75bebefe)
1 /*	$NetBSD: scsipi_base.c,v 1.185 2020/02/19 16:05:41 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.185 2020/02/19 16:05:41 riastradh Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53 
54 #include <dev/scsipi/scsi_sdt.h>
55 #include <dev/scsipi/scsi_spc.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsipi_disk.h>
58 #include <dev/scsipi/scsipiconf.h>
59 #include <dev/scsipi/scsipi_base.h>
60 
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_message.h>
63 
64 #include <machine/param.h>
65 
66 SDT_PROVIDER_DEFINE(scsi);
67 
68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
72 
73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
74     "struct scsipi_channel *"/*chan*/,
75     "scsipi_adapter_req_t"/*req*/,
76     "void *"/*arg*/);
77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
78     "struct scsipi_channel *"/*chan*/,
79     "scsipi_adapter_req_t"/*req*/,
80     "void *"/*arg*/);
81 
82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
83     "struct scsipi_channel *"/*chan*/);
84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
85     "struct scsipi_channel *"/*chan*/,
86     "struct scsipi_xfer *"/*xs*/);
87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
88     "struct scsipi_channel *"/*chan*/);
89 
90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
97 
98 static int	scsipi_complete(struct scsipi_xfer *);
99 static void	scsipi_request_sense(struct scsipi_xfer *);
100 static int	scsipi_enqueue(struct scsipi_xfer *);
101 static void	scsipi_run_queue(struct scsipi_channel *chan);
102 
103 static void	scsipi_completion_thread(void *);
104 
105 static void	scsipi_get_tag(struct scsipi_xfer *);
106 static void	scsipi_put_tag(struct scsipi_xfer *);
107 
108 static int	scsipi_get_resource(struct scsipi_channel *);
109 static void	scsipi_put_resource(struct scsipi_channel *);
110 
111 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
112 		    struct scsipi_max_openings *);
113 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
114 
115 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
116 
117 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
118 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
119 
120 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
121 
122 static struct pool scsipi_xfer_pool;
123 
124 int scsipi_xs_count = 0;
125 
126 /*
127  * scsipi_init:
128  *
129  *	Called when a scsibus or atapibus is attached to the system
130  *	to initialize shared data structures.
131  */
132 void
133 scsipi_init(void)
134 {
135 	static int scsipi_init_done;
136 
137 	if (scsipi_init_done)
138 		return;
139 	scsipi_init_done = 1;
140 
141 	/* Initialize the scsipi_xfer pool. */
142 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
143 	    0, 0, "scxspl", NULL, IPL_BIO);
144 	if (pool_prime(&scsipi_xfer_pool,
145 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
146 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
147 	}
148 
149 	scsipi_ioctl_init();
150 }
151 
152 /*
153  * scsipi_channel_init:
154  *
155  *	Initialize a scsipi_channel when it is attached.
156  */
157 int
158 scsipi_channel_init(struct scsipi_channel *chan)
159 {
160 	struct scsipi_adapter *adapt = chan->chan_adapter;
161 	int i;
162 
163 	/* Initialize shared data. */
164 	scsipi_init();
165 
166 	/* Initialize the queues. */
167 	TAILQ_INIT(&chan->chan_queue);
168 	TAILQ_INIT(&chan->chan_complete);
169 
170 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
171 		LIST_INIT(&chan->chan_periphtab[i]);
172 
173 	/*
174 	 * Create the asynchronous completion thread.
175 	 */
176 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
177 	    &chan->chan_thread, "%s", chan->chan_name)) {
178 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
179 		    "channel %d\n", chan->chan_channel);
180 		panic("scsipi_channel_init");
181 	}
182 
183 	return 0;
184 }
185 
186 /*
187  * scsipi_channel_shutdown:
188  *
189  *	Shutdown a scsipi_channel.
190  */
191 void
192 scsipi_channel_shutdown(struct scsipi_channel *chan)
193 {
194 
195 	mutex_enter(chan_mtx(chan));
196 	/*
197 	 * Shut down the completion thread.
198 	 */
199 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
200 	cv_broadcast(chan_cv_complete(chan));
201 
202 	/*
203 	 * Now wait for the thread to exit.
204 	 */
205 	while (chan->chan_thread != NULL)
206 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
207 	mutex_exit(chan_mtx(chan));
208 }
209 
210 static uint32_t
211 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
212 {
213 	uint32_t hash;
214 
215 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
216 	hash = hash32_buf(&l, sizeof(l), hash);
217 
218 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
219 }
220 
221 /*
222  * scsipi_insert_periph:
223  *
224  *	Insert a periph into the channel.
225  */
226 void
227 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
228 {
229 	uint32_t hash;
230 
231 	hash = scsipi_chan_periph_hash(periph->periph_target,
232 	    periph->periph_lun);
233 
234 	mutex_enter(chan_mtx(chan));
235 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
236 	mutex_exit(chan_mtx(chan));
237 }
238 
239 /*
240  * scsipi_remove_periph:
241  *
242  *	Remove a periph from the channel.
243  */
244 void
245 scsipi_remove_periph(struct scsipi_channel *chan,
246     struct scsipi_periph *periph)
247 {
248 
249 	LIST_REMOVE(periph, periph_hash);
250 }
251 
252 /*
253  * scsipi_lookup_periph:
254  *
255  *	Lookup a periph on the specified channel.
256  */
257 static struct scsipi_periph *
258 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
259 {
260 	struct scsipi_periph *periph;
261 	uint32_t hash;
262 
263 	if (target >= chan->chan_ntargets ||
264 	    lun >= chan->chan_nluns)
265 		return NULL;
266 
267 	hash = scsipi_chan_periph_hash(target, lun);
268 
269 	if (lock)
270 		mutex_enter(chan_mtx(chan));
271 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
272 		if (periph->periph_target == target &&
273 		    periph->periph_lun == lun)
274 			break;
275 	}
276 	if (lock)
277 		mutex_exit(chan_mtx(chan));
278 
279 	return periph;
280 }
281 
282 struct scsipi_periph *
283 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
284 {
285 	return scsipi_lookup_periph_internal(chan, target, lun, false);
286 }
287 
288 struct scsipi_periph *
289 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
290 {
291 	return scsipi_lookup_periph_internal(chan, target, lun, true);
292 }
293 
294 /*
295  * scsipi_get_resource:
296  *
297  *	Allocate a single xfer `resource' from the channel.
298  *
299  *	NOTE: Must be called with channel lock held
300  */
301 static int
302 scsipi_get_resource(struct scsipi_channel *chan)
303 {
304 	struct scsipi_adapter *adapt = chan->chan_adapter;
305 
306 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
307 		if (chan->chan_openings > 0) {
308 			chan->chan_openings--;
309 			return 1;
310 		}
311 		return 0;
312 	}
313 
314 	if (adapt->adapt_openings > 0) {
315 		adapt->adapt_openings--;
316 		return 1;
317 	}
318 	return 0;
319 }
320 
321 /*
322  * scsipi_grow_resources:
323  *
324  *	Attempt to grow resources for a channel.  If this succeeds,
325  *	we allocate one for our caller.
326  *
327  *	NOTE: Must be called with channel lock held
328  */
329 static inline int
330 scsipi_grow_resources(struct scsipi_channel *chan)
331 {
332 
333 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
334 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
335 			mutex_exit(chan_mtx(chan));
336 			scsipi_adapter_request(chan,
337 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
338 			mutex_enter(chan_mtx(chan));
339 			return scsipi_get_resource(chan);
340 		}
341 		/*
342 		 * ask the channel thread to do it. It'll have to thaw the
343 		 * queue
344 		 */
345 		scsipi_channel_freeze_locked(chan, 1);
346 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
347 		cv_broadcast(chan_cv_complete(chan));
348 		return 0;
349 	}
350 
351 	return 0;
352 }
353 
354 /*
355  * scsipi_put_resource:
356  *
357  *	Free a single xfer `resource' to the channel.
358  *
359  *	NOTE: Must be called with channel lock held
360  */
361 static void
362 scsipi_put_resource(struct scsipi_channel *chan)
363 {
364 	struct scsipi_adapter *adapt = chan->chan_adapter;
365 
366 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
367 		chan->chan_openings++;
368 	else
369 		adapt->adapt_openings++;
370 }
371 
372 /*
373  * scsipi_get_tag:
374  *
375  *	Get a tag ID for the specified xfer.
376  *
377  *	NOTE: Must be called with channel lock held
378  */
379 static void
380 scsipi_get_tag(struct scsipi_xfer *xs)
381 {
382 	struct scsipi_periph *periph = xs->xs_periph;
383 	int bit, tag;
384 	u_int word;
385 
386 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
387 
388 	bit = 0;	/* XXX gcc */
389 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
390 		bit = ffs(periph->periph_freetags[word]);
391 		if (bit != 0)
392 			break;
393 	}
394 #ifdef DIAGNOSTIC
395 	if (word == PERIPH_NTAGWORDS) {
396 		scsipi_printaddr(periph);
397 		printf("no free tags\n");
398 		panic("scsipi_get_tag");
399 	}
400 #endif
401 
402 	bit -= 1;
403 	periph->periph_freetags[word] &= ~(1U << bit);
404 	tag = (word << 5) | bit;
405 
406 	/* XXX Should eventually disallow this completely. */
407 	if (tag >= periph->periph_openings) {
408 		scsipi_printaddr(periph);
409 		printf("WARNING: tag %d greater than available openings %d\n",
410 		    tag, periph->periph_openings);
411 	}
412 
413 	xs->xs_tag_id = tag;
414 	SDT_PROBE3(scsi, base, tag, get,
415 	    xs, xs->xs_tag_id, xs->xs_tag_type);
416 }
417 
418 /*
419  * scsipi_put_tag:
420  *
421  *	Put the tag ID for the specified xfer back into the pool.
422  *
423  *	NOTE: Must be called with channel lock held
424  */
425 static void
426 scsipi_put_tag(struct scsipi_xfer *xs)
427 {
428 	struct scsipi_periph *periph = xs->xs_periph;
429 	int word, bit;
430 
431 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
432 
433 	SDT_PROBE3(scsi, base, tag, put,
434 	    xs, xs->xs_tag_id, xs->xs_tag_type);
435 
436 	word = xs->xs_tag_id >> 5;
437 	bit = xs->xs_tag_id & 0x1f;
438 
439 	periph->periph_freetags[word] |= (1U << bit);
440 }
441 
442 /*
443  * scsipi_get_xs:
444  *
445  *	Allocate an xfer descriptor and associate it with the
446  *	specified peripheral.  If the peripheral has no more
447  *	available command openings, we either block waiting for
448  *	one to become available, or fail.
449  *
450  *	When this routine is called with the channel lock held
451  *	the flags must include XS_CTL_NOSLEEP.
452  */
453 struct scsipi_xfer *
454 scsipi_get_xs(struct scsipi_periph *periph, int flags)
455 {
456 	struct scsipi_xfer *xs;
457 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
458 
459 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
460 
461 	KASSERT(!cold);
462 
463 #ifdef DIAGNOSTIC
464 	/*
465 	 * URGENT commands can never be ASYNC.
466 	 */
467 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
468 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
469 		scsipi_printaddr(periph);
470 		printf("URGENT and ASYNC\n");
471 		panic("scsipi_get_xs");
472 	}
473 #endif
474 
475 	/*
476 	 * Wait for a command opening to become available.  Rules:
477 	 *
478 	 *	- All xfers must wait for an available opening.
479 	 *	  Exception: URGENT xfers can proceed when
480 	 *	  active == openings, because we use the opening
481 	 *	  of the command we're recovering for.
482 	 *	- if the periph has sense pending, only URGENT & REQSENSE
483 	 *	  xfers may proceed.
484 	 *
485 	 *	- If the periph is recovering, only URGENT xfers may
486 	 *	  proceed.
487 	 *
488 	 *	- If the periph is currently executing a recovery
489 	 *	  command, URGENT commands must block, because only
490 	 *	  one recovery command can execute at a time.
491 	 */
492 	if (lock)
493 		mutex_enter(chan_mtx(periph->periph_channel));
494 	for (;;) {
495 		if (flags & XS_CTL_URGENT) {
496 			if (periph->periph_active > periph->periph_openings)
497 				goto wait_for_opening;
498 			if (periph->periph_flags & PERIPH_SENSE) {
499 				if ((flags & XS_CTL_REQSENSE) == 0)
500 					goto wait_for_opening;
501 			} else {
502 				if ((periph->periph_flags &
503 				    PERIPH_RECOVERY_ACTIVE) != 0)
504 					goto wait_for_opening;
505 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
506 			}
507 			break;
508 		}
509 		if (periph->periph_active >= periph->periph_openings ||
510 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
511 			goto wait_for_opening;
512 		periph->periph_active++;
513 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
514 		break;
515 
516  wait_for_opening:
517 		if (flags & XS_CTL_NOSLEEP) {
518 			KASSERT(!lock);
519 			return NULL;
520 		}
521 		KASSERT(lock);
522 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
523 		periph->periph_flags |= PERIPH_WAITING;
524 		cv_wait(periph_cv_periph(periph),
525 		    chan_mtx(periph->periph_channel));
526 	}
527 	if (lock)
528 		mutex_exit(chan_mtx(periph->periph_channel));
529 
530 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
531 	xs = pool_get(&scsipi_xfer_pool,
532 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
533 	if (xs == NULL) {
534 		if (lock)
535 			mutex_enter(chan_mtx(periph->periph_channel));
536 		if (flags & XS_CTL_URGENT) {
537 			if ((flags & XS_CTL_REQSENSE) == 0)
538 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
539 		} else
540 			periph->periph_active--;
541 		if (lock)
542 			mutex_exit(chan_mtx(periph->periph_channel));
543 		scsipi_printaddr(periph);
544 		printf("unable to allocate %sscsipi_xfer\n",
545 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
546 	}
547 
548 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
549 
550 	if (xs != NULL) {
551 		memset(xs, 0, sizeof(*xs));
552 		callout_init(&xs->xs_callout, 0);
553 		xs->xs_periph = periph;
554 		xs->xs_control = flags;
555 		xs->xs_status = 0;
556 		if ((flags & XS_CTL_NOSLEEP) == 0)
557 			mutex_enter(chan_mtx(periph->periph_channel));
558 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
559 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
560 		if ((flags & XS_CTL_NOSLEEP) == 0)
561 			mutex_exit(chan_mtx(periph->periph_channel));
562 	}
563 	return xs;
564 }
565 
566 /*
567  * scsipi_put_xs:
568  *
569  *	Release an xfer descriptor, decreasing the outstanding command
570  *	count for the peripheral.  If there is a thread waiting for
571  *	an opening, wake it up.  If not, kick any queued I/O the
572  *	peripheral may have.
573  *
574  *	NOTE: Must be called with channel lock held
575  */
576 void
577 scsipi_put_xs(struct scsipi_xfer *xs)
578 {
579 	struct scsipi_periph *periph = xs->xs_periph;
580 	int flags = xs->xs_control;
581 
582 	SDT_PROBE1(scsi, base, xfer, free,  xs);
583 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
584 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
585 
586 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
587 	callout_destroy(&xs->xs_callout);
588 	pool_put(&scsipi_xfer_pool, xs);
589 
590 #ifdef DIAGNOSTIC
591 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
592 	    periph->periph_active == 0) {
593 		scsipi_printaddr(periph);
594 		printf("recovery without a command to recovery for\n");
595 		panic("scsipi_put_xs");
596 	}
597 #endif
598 
599 	if (flags & XS_CTL_URGENT) {
600 		if ((flags & XS_CTL_REQSENSE) == 0)
601 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
602 	} else
603 		periph->periph_active--;
604 	if (periph->periph_active == 0 &&
605 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
606 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
607 		cv_broadcast(periph_cv_active(periph));
608 	}
609 
610 	if (periph->periph_flags & PERIPH_WAITING) {
611 		periph->periph_flags &= ~PERIPH_WAITING;
612 		cv_broadcast(periph_cv_periph(periph));
613 	} else {
614 		if (periph->periph_switch->psw_start != NULL &&
615 		    device_is_active(periph->periph_dev)) {
616 			SC_DEBUG(periph, SCSIPI_DB2,
617 			    ("calling private start()\n"));
618 			(*periph->periph_switch->psw_start)(periph);
619 		}
620 	}
621 }
622 
623 /*
624  * scsipi_channel_freeze:
625  *
626  *	Freeze a channel's xfer queue.
627  */
628 void
629 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
630 {
631 	bool lock = chan_running(chan) > 0;
632 
633 	if (lock)
634 		mutex_enter(chan_mtx(chan));
635 	chan->chan_qfreeze += count;
636 	if (lock)
637 		mutex_exit(chan_mtx(chan));
638 }
639 
640 static void
641 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
642 {
643 
644 	chan->chan_qfreeze += count;
645 }
646 
647 /*
648  * scsipi_channel_thaw:
649  *
650  *	Thaw a channel's xfer queue.
651  */
652 void
653 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
654 {
655 	bool lock = chan_running(chan) > 0;
656 
657 	if (lock)
658 		mutex_enter(chan_mtx(chan));
659 	chan->chan_qfreeze -= count;
660 	/*
661 	 * Don't let the freeze count go negative.
662 	 *
663 	 * Presumably the adapter driver could keep track of this,
664 	 * but it might just be easier to do this here so as to allow
665 	 * multiple callers, including those outside the adapter driver.
666 	 */
667 	if (chan->chan_qfreeze < 0) {
668 		chan->chan_qfreeze = 0;
669 	}
670 	if (lock)
671 		mutex_exit(chan_mtx(chan));
672 
673 	/*
674 	 * until the channel is running
675 	 */
676 	if (!lock)
677 		return;
678 
679 	/*
680 	 * Kick the channel's queue here.  Note, we may be running in
681 	 * interrupt context (softclock or HBA's interrupt), so the adapter
682 	 * driver had better not sleep.
683 	 */
684 	if (chan->chan_qfreeze == 0)
685 		scsipi_run_queue(chan);
686 }
687 
688 /*
689  * scsipi_channel_timed_thaw:
690  *
691  *	Thaw a channel after some time has expired. This will also
692  * 	run the channel's queue if the freeze count has reached 0.
693  */
694 void
695 scsipi_channel_timed_thaw(void *arg)
696 {
697 	struct scsipi_channel *chan = arg;
698 
699 	scsipi_channel_thaw(chan, 1);
700 }
701 
702 /*
703  * scsipi_periph_freeze:
704  *
705  *	Freeze a device's xfer queue.
706  */
707 void
708 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
709 {
710 
711 	periph->periph_qfreeze += count;
712 }
713 
714 /*
715  * scsipi_periph_thaw:
716  *
717  *	Thaw a device's xfer queue.
718  */
719 void
720 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
721 {
722 
723 	periph->periph_qfreeze -= count;
724 #ifdef DIAGNOSTIC
725 	if (periph->periph_qfreeze < 0) {
726 		static const char pc[] = "periph freeze count < 0";
727 		scsipi_printaddr(periph);
728 		printf("%s\n", pc);
729 		panic(pc);
730 	}
731 #endif
732 	if (periph->periph_qfreeze == 0 &&
733 	    (periph->periph_flags & PERIPH_WAITING) != 0)
734 		cv_broadcast(periph_cv_periph(periph));
735 }
736 
737 void
738 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
739 {
740 
741 	mutex_enter(chan_mtx(periph->periph_channel));
742 	scsipi_periph_freeze_locked(periph, count);
743 	mutex_exit(chan_mtx(periph->periph_channel));
744 }
745 
746 void
747 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
748 {
749 
750 	mutex_enter(chan_mtx(periph->periph_channel));
751 	scsipi_periph_thaw_locked(periph, count);
752 	mutex_exit(chan_mtx(periph->periph_channel));
753 }
754 
755 /*
756  * scsipi_periph_timed_thaw:
757  *
758  *	Thaw a device after some time has expired.
759  */
760 void
761 scsipi_periph_timed_thaw(void *arg)
762 {
763 	struct scsipi_periph *periph = arg;
764 	struct scsipi_channel *chan = periph->periph_channel;
765 
766 	callout_stop(&periph->periph_callout);
767 
768 	mutex_enter(chan_mtx(chan));
769 	scsipi_periph_thaw_locked(periph, 1);
770 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
771 		/*
772 		 * Kick the channel's queue here.  Note, we're running in
773 		 * interrupt context (softclock), so the adapter driver
774 		 * had better not sleep.
775 		 */
776 		mutex_exit(chan_mtx(chan));
777 		scsipi_run_queue(periph->periph_channel);
778 	} else {
779 		/*
780 		 * Tell the completion thread to kick the channel's queue here.
781 		 */
782 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
783 		cv_broadcast(chan_cv_complete(chan));
784 		mutex_exit(chan_mtx(chan));
785 	}
786 }
787 
788 /*
789  * scsipi_wait_drain:
790  *
791  *	Wait for a periph's pending xfers to drain.
792  */
793 void
794 scsipi_wait_drain(struct scsipi_periph *periph)
795 {
796 	struct scsipi_channel *chan = periph->periph_channel;
797 
798 	mutex_enter(chan_mtx(chan));
799 	while (periph->periph_active != 0) {
800 		periph->periph_flags |= PERIPH_WAITDRAIN;
801 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
802 	}
803 	mutex_exit(chan_mtx(chan));
804 }
805 
806 /*
807  * scsipi_kill_pending:
808  *
809  *	Kill off all pending xfers for a periph.
810  *
811  *	NOTE: Must be called with channel lock held
812  */
813 void
814 scsipi_kill_pending(struct scsipi_periph *periph)
815 {
816 	struct scsipi_channel *chan = periph->periph_channel;
817 
818 	(*chan->chan_bustype->bustype_kill_pending)(periph);
819 	while (periph->periph_active != 0) {
820 		periph->periph_flags |= PERIPH_WAITDRAIN;
821 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
822 	}
823 }
824 
825 /*
826  * scsipi_print_cdb:
827  * prints a command descriptor block (for debug purpose, error messages,
828  * SCSIVERBOSE, ...)
829  */
830 void
831 scsipi_print_cdb(struct scsipi_generic *cmd)
832 {
833 	int i, j;
834 
835  	printf("0x%02x", cmd->opcode);
836 
837  	switch (CDB_GROUPID(cmd->opcode)) {
838  	case CDB_GROUPID_0:
839  		j = CDB_GROUP0;
840  		break;
841  	case CDB_GROUPID_1:
842  		j = CDB_GROUP1;
843  		break;
844  	case CDB_GROUPID_2:
845  		j = CDB_GROUP2;
846  		break;
847  	case CDB_GROUPID_3:
848  		j = CDB_GROUP3;
849  		break;
850  	case CDB_GROUPID_4:
851  		j = CDB_GROUP4;
852  		break;
853  	case CDB_GROUPID_5:
854  		j = CDB_GROUP5;
855  		break;
856  	case CDB_GROUPID_6:
857  		j = CDB_GROUP6;
858  		break;
859  	case CDB_GROUPID_7:
860  		j = CDB_GROUP7;
861  		break;
862  	default:
863  		j = 0;
864  	}
865  	if (j == 0)
866  		j = sizeof (cmd->bytes);
867  	for (i = 0; i < j-1; i++) /* already done the opcode */
868  		printf(" %02x", cmd->bytes[i]);
869 }
870 
871 /*
872  * scsipi_interpret_sense:
873  *
874  *	Look at the returned sense and act on the error, determining
875  *	the unix error number to pass back.  (0 = report no error)
876  *
877  *	NOTE: If we return ERESTART, we are expected to haved
878  *	thawed the device!
879  *
880  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
881  */
882 int
883 scsipi_interpret_sense(struct scsipi_xfer *xs)
884 {
885 	struct scsi_sense_data *sense;
886 	struct scsipi_periph *periph = xs->xs_periph;
887 	u_int8_t key;
888 	int error;
889 	u_int32_t info;
890 	static const char *error_mes[] = {
891 		"soft error (corrected)",
892 		"not ready", "medium error",
893 		"non-media hardware failure", "illegal request",
894 		"unit attention", "readonly device",
895 		"no data found", "vendor unique",
896 		"copy aborted", "command aborted",
897 		"search returned equal", "volume overflow",
898 		"verify miscompare", "unknown error key"
899 	};
900 
901 	sense = &xs->sense.scsi_sense;
902 #ifdef SCSIPI_DEBUG
903 	if (periph->periph_flags & SCSIPI_DB1) {
904 	        int count, len;
905 		scsipi_printaddr(periph);
906 		printf(" sense debug information:\n");
907 		printf("\tcode 0x%x valid %d\n",
908 			SSD_RCODE(sense->response_code),
909 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
910 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
911 			sense->segment,
912 			SSD_SENSE_KEY(sense->flags),
913 			sense->flags & SSD_ILI ? 1 : 0,
914 			sense->flags & SSD_EOM ? 1 : 0,
915 			sense->flags & SSD_FILEMARK ? 1 : 0);
916 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
917 			"extra bytes\n",
918 			sense->info[0],
919 			sense->info[1],
920 			sense->info[2],
921 			sense->info[3],
922 			sense->extra_len);
923 		len = SSD_ADD_BYTES_LIM(sense);
924 		printf("\textra (up to %d bytes): ", len);
925 		for (count = 0; count < len; count++)
926 			printf("0x%x ", sense->csi[count]);
927 		printf("\n");
928 	}
929 #endif
930 
931 	/*
932 	 * If the periph has its own error handler, call it first.
933 	 * If it returns a legit error value, return that, otherwise
934 	 * it wants us to continue with normal error processing.
935 	 */
936 	if (periph->periph_switch->psw_error != NULL) {
937 		SC_DEBUG(periph, SCSIPI_DB2,
938 		    ("calling private err_handler()\n"));
939 		error = (*periph->periph_switch->psw_error)(xs);
940 		if (error != EJUSTRETURN)
941 			return error;
942 	}
943 	/* otherwise use the default */
944 	switch (SSD_RCODE(sense->response_code)) {
945 
946 		/*
947 		 * Old SCSI-1 and SASI devices respond with
948 		 * codes other than 70.
949 		 */
950 	case 0x00:		/* no error (command completed OK) */
951 		return 0;
952 	case 0x04:		/* drive not ready after it was selected */
953 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
954 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
955 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
956 			return 0;
957 		/* XXX - display some sort of error here? */
958 		return EIO;
959 	case 0x20:		/* invalid command */
960 		if ((xs->xs_control &
961 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
962 			return 0;
963 		return EINVAL;
964 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
965 		return EACCES;
966 
967 		/*
968 		 * If it's code 70, use the extended stuff and
969 		 * interpret the key
970 		 */
971 	case 0x71:		/* delayed error */
972 		scsipi_printaddr(periph);
973 		key = SSD_SENSE_KEY(sense->flags);
974 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
975 		/* FALLTHROUGH */
976 	case 0x70:
977 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
978 			info = _4btol(sense->info);
979 		else
980 			info = 0;
981 		key = SSD_SENSE_KEY(sense->flags);
982 
983 		switch (key) {
984 		case SKEY_NO_SENSE:
985 		case SKEY_RECOVERED_ERROR:
986 			if (xs->resid == xs->datalen && xs->datalen) {
987 				/*
988 				 * Why is this here?
989 				 */
990 				xs->resid = 0;	/* not short read */
991 			}
992 			error = 0;
993 			break;
994 		case SKEY_EQUAL:
995 			error = 0;
996 			break;
997 		case SKEY_NOT_READY:
998 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
999 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
1000 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
1001 				return 0;
1002 			if (sense->asc == 0x3A) {
1003 				error = ENODEV; /* Medium not present */
1004 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
1005 					return error;
1006 			} else
1007 				error = EIO;
1008 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1009 				return error;
1010 			break;
1011 		case SKEY_ILLEGAL_REQUEST:
1012 			if ((xs->xs_control &
1013 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
1014 				return 0;
1015 			/*
1016 			 * Handle the case where a device reports
1017 			 * Logical Unit Not Supported during discovery.
1018 			 */
1019 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
1020 			    sense->asc == 0x25 &&
1021 			    sense->ascq == 0x00)
1022 				return EINVAL;
1023 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1024 				return EIO;
1025 			error = EINVAL;
1026 			break;
1027 		case SKEY_UNIT_ATTENTION:
1028 			if (sense->asc == 0x29 &&
1029 			    sense->ascq == 0x00) {
1030 				/* device or bus reset */
1031 				return ERESTART;
1032 			}
1033 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
1034 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
1035 			if ((xs->xs_control &
1036 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
1037 				/* XXX Should reupload any transient state. */
1038 				(periph->periph_flags &
1039 				 PERIPH_REMOVABLE) == 0) {
1040 				return ERESTART;
1041 			}
1042 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1043 				return EIO;
1044 			error = EIO;
1045 			break;
1046 		case SKEY_DATA_PROTECT:
1047 			error = EROFS;
1048 			break;
1049 		case SKEY_BLANK_CHECK:
1050 			error = 0;
1051 			break;
1052 		case SKEY_ABORTED_COMMAND:
1053 			if (xs->xs_retries != 0) {
1054 				xs->xs_retries--;
1055 				error = ERESTART;
1056 			} else
1057 				error = EIO;
1058 			break;
1059 		case SKEY_VOLUME_OVERFLOW:
1060 			error = ENOSPC;
1061 			break;
1062 		default:
1063 			error = EIO;
1064 			break;
1065 		}
1066 
1067 		/* Print verbose decode if appropriate and possible */
1068 		if ((key == 0) ||
1069 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1070 		    (scsipi_print_sense(xs, 0) != 0))
1071 			return error;
1072 
1073 		/* Print brief(er) sense information */
1074 		scsipi_printaddr(periph);
1075 		printf("%s", error_mes[key - 1]);
1076 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1077 			switch (key) {
1078 			case SKEY_NOT_READY:
1079 			case SKEY_ILLEGAL_REQUEST:
1080 			case SKEY_UNIT_ATTENTION:
1081 			case SKEY_DATA_PROTECT:
1082 				break;
1083 			case SKEY_BLANK_CHECK:
1084 				printf(", requested size: %d (decimal)",
1085 				    info);
1086 				break;
1087 			case SKEY_ABORTED_COMMAND:
1088 				if (xs->xs_retries)
1089 					printf(", retrying");
1090 				printf(", cmd 0x%x, info 0x%x",
1091 				    xs->cmd->opcode, info);
1092 				break;
1093 			default:
1094 				printf(", info = %d (decimal)", info);
1095 			}
1096 		}
1097 		if (sense->extra_len != 0) {
1098 			int n;
1099 			printf(", data =");
1100 			for (n = 0; n < sense->extra_len; n++)
1101 				printf(" %02x",
1102 				    sense->csi[n]);
1103 		}
1104 		printf("\n");
1105 		return error;
1106 
1107 	/*
1108 	 * Some other code, just report it
1109 	 */
1110 	default:
1111 #if    defined(SCSIDEBUG) || defined(DEBUG)
1112 	{
1113 		static const char *uc = "undecodable sense error";
1114 		int i;
1115 		u_int8_t *cptr = (u_int8_t *) sense;
1116 		scsipi_printaddr(periph);
1117 		if (xs->cmd == &xs->cmdstore) {
1118 			printf("%s for opcode 0x%x, data=",
1119 			    uc, xs->cmdstore.opcode);
1120 		} else {
1121 			printf("%s, data=", uc);
1122 		}
1123 		for (i = 0; i < sizeof (sense); i++)
1124 			printf(" 0x%02x", *(cptr++) & 0xff);
1125 		printf("\n");
1126 	}
1127 #else
1128 		scsipi_printaddr(periph);
1129 		printf("Sense Error Code 0x%x",
1130 			SSD_RCODE(sense->response_code));
1131 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1132 			struct scsi_sense_data_unextended *usense =
1133 			    (struct scsi_sense_data_unextended *)sense;
1134 			printf(" at block no. %d (decimal)",
1135 			    _3btol(usense->block));
1136 		}
1137 		printf("\n");
1138 #endif
1139 		return EIO;
1140 	}
1141 }
1142 
1143 /*
1144  * scsipi_test_unit_ready:
1145  *
1146  *	Issue a `test unit ready' request.
1147  */
1148 int
1149 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1150 {
1151 	struct scsi_test_unit_ready cmd;
1152 	int retries;
1153 
1154 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1155 	if (periph->periph_quirks & PQUIRK_NOTUR)
1156 		return 0;
1157 
1158 	if (flags & XS_CTL_DISCOVERY)
1159 		retries = 0;
1160 	else
1161 		retries = SCSIPIRETRIES;
1162 
1163 	memset(&cmd, 0, sizeof(cmd));
1164 	cmd.opcode = SCSI_TEST_UNIT_READY;
1165 
1166 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1167 	    retries, 10000, NULL, flags);
1168 }
1169 
1170 static const struct scsipi_inquiry3_pattern {
1171 	const char vendor[8];
1172 	const char product[16];
1173 	const char revision[4];
1174 } scsipi_inquiry3_quirk[] = {
1175 	{ "ES-6600 ", "", "" },
1176 };
1177 
1178 static int
1179 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1180 {
1181 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1182 		const struct scsipi_inquiry3_pattern *q =
1183 		    &scsipi_inquiry3_quirk[i];
1184 #define MATCH(field) \
1185     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1186 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1187 			return 0;
1188 	}
1189 	return 1;
1190 }
1191 
1192 /*
1193  * scsipi_inquire:
1194  *
1195  *	Ask the device about itself.
1196  */
1197 int
1198 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1199     int flags)
1200 {
1201 	struct scsipi_inquiry cmd;
1202 	int error;
1203 	int retries;
1204 
1205 	if (flags & XS_CTL_DISCOVERY)
1206 		retries = 0;
1207 	else
1208 		retries = SCSIPIRETRIES;
1209 
1210 	/*
1211 	 * If we request more data than the device can provide, it SHOULD just
1212 	 * return a short response.  However, some devices error with an
1213 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1214 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1215 	 * and sends corrupted CRCs).  To work around this, and to bring our
1216 	 * behavior more in line with other OSes, we do a shorter inquiry,
1217 	 * covering all the SCSI-2 information, first, and then request more
1218 	 * data iff the "additional length" field indicates there is more.
1219 	 * - mycroft, 2003/10/16
1220 	 */
1221 	memset(&cmd, 0, sizeof(cmd));
1222 	cmd.opcode = INQUIRY;
1223 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1224 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1225 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1226 	    10000, NULL, flags | XS_CTL_DATA_IN);
1227 	if (!error &&
1228 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1229 	    if (scsipi_inquiry3_ok(inqbuf)) {
1230 #if 0
1231 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1232 #endif
1233 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1234 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1235 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1236 		    10000, NULL, flags | XS_CTL_DATA_IN);
1237 #if 0
1238 printf("inquire: error=%d\n", error);
1239 #endif
1240 	    }
1241 	}
1242 
1243 #ifdef SCSI_OLD_NOINQUIRY
1244 	/*
1245 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1246 	 * This board doesn't support the INQUIRY command at all.
1247 	 */
1248 	if (error == EINVAL || error == EACCES) {
1249 		/*
1250 		 * Conjure up an INQUIRY response.
1251 		 */
1252 		inqbuf->device = (error == EINVAL ?
1253 			 SID_QUAL_LU_PRESENT :
1254 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1255 		inqbuf->dev_qual2 = 0;
1256 		inqbuf->version = 0;
1257 		inqbuf->response_format = SID_FORMAT_SCSI1;
1258 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1259 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1260 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1261 		error = 0;
1262 	}
1263 
1264 	/*
1265 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1266 	 * This board gives an empty response to an INQUIRY command.
1267 	 */
1268 	else if (error == 0 &&
1269 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1270 	    inqbuf->dev_qual2 == 0 &&
1271 	    inqbuf->version == 0 &&
1272 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1273 		/*
1274 		 * Fill out the INQUIRY response.
1275 		 */
1276 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1277 		inqbuf->dev_qual2 = SID_REMOVABLE;
1278 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1279 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1280 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1281 	}
1282 #endif /* SCSI_OLD_NOINQUIRY */
1283 
1284 	return error;
1285 }
1286 
1287 /*
1288  * scsipi_prevent:
1289  *
1290  *	Prevent or allow the user to remove the media
1291  */
1292 int
1293 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1294 {
1295 	struct scsi_prevent_allow_medium_removal cmd;
1296 
1297 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1298 		return 0;
1299 
1300 	memset(&cmd, 0, sizeof(cmd));
1301 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1302 	cmd.how = type;
1303 
1304 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1305 	    SCSIPIRETRIES, 5000, NULL, flags));
1306 }
1307 
1308 /*
1309  * scsipi_start:
1310  *
1311  *	Send a START UNIT.
1312  */
1313 int
1314 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1315 {
1316 	struct scsipi_start_stop cmd;
1317 
1318 	memset(&cmd, 0, sizeof(cmd));
1319 	cmd.opcode = START_STOP;
1320 	cmd.byte2 = 0x00;
1321 	cmd.how = type;
1322 
1323 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1324 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1325 }
1326 
1327 /*
1328  * scsipi_mode_sense, scsipi_mode_sense_big:
1329  *	get a sense page from a device
1330  */
1331 
1332 int
1333 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1334     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1335     int timeout)
1336 {
1337 	struct scsi_mode_sense_6 cmd;
1338 
1339 	memset(&cmd, 0, sizeof(cmd));
1340 	cmd.opcode = SCSI_MODE_SENSE_6;
1341 	cmd.byte2 = byte2;
1342 	cmd.page = page;
1343 	cmd.length = len & 0xff;
1344 
1345 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1346 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1347 }
1348 
1349 int
1350 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1351     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1352     int timeout)
1353 {
1354 	struct scsi_mode_sense_10 cmd;
1355 
1356 	memset(&cmd, 0, sizeof(cmd));
1357 	cmd.opcode = SCSI_MODE_SENSE_10;
1358 	cmd.byte2 = byte2;
1359 	cmd.page = page;
1360 	_lto2b(len, cmd.length);
1361 
1362 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1363 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1364 }
1365 
1366 int
1367 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1368     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1369     int timeout)
1370 {
1371 	struct scsi_mode_select_6 cmd;
1372 
1373 	memset(&cmd, 0, sizeof(cmd));
1374 	cmd.opcode = SCSI_MODE_SELECT_6;
1375 	cmd.byte2 = byte2;
1376 	cmd.length = len & 0xff;
1377 
1378 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1379 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1380 }
1381 
1382 int
1383 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1384     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1385     int timeout)
1386 {
1387 	struct scsi_mode_select_10 cmd;
1388 
1389 	memset(&cmd, 0, sizeof(cmd));
1390 	cmd.opcode = SCSI_MODE_SELECT_10;
1391 	cmd.byte2 = byte2;
1392 	_lto2b(len, cmd.length);
1393 
1394 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1395 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1396 }
1397 
1398 /*
1399  * scsipi_get_opcodeinfo:
1400  *
1401  * query the device for supported commends and their timeout
1402  * building a timeout lookup table if timeout information is available.
1403  */
1404 void
1405 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
1406 {
1407 	u_int8_t *data;
1408 	int len = 16*1024;
1409 	int rc;
1410 	struct scsi_repsuppopcode cmd;
1411 
1412 	/* refrain from asking for supported opcodes */
1413 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
1414 	    periph->periph_type == T_PROCESSOR || /* spec. */
1415 	    periph->periph_type == T_CDROM) /* spec. */
1416 		return;
1417 
1418 	scsipi_free_opcodeinfo(periph);
1419 
1420 	/*
1421 	 * query REPORT SUPPORTED OPERATION CODES
1422 	 * if OK
1423 	 *   enumerate all codes
1424 	 *     if timeout exists insert maximum into opcode table
1425 	 */
1426 
1427 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
1428 
1429 	memset(&cmd, 0, sizeof(cmd));
1430 	cmd.opcode = SCSI_MAINTENANCE_IN;
1431 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
1432 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
1433 	_lto4b(len, cmd.alloclen);
1434 
1435 	rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1436 			    (void *)data, len, 0, 1000, NULL,
1437 			    XS_CTL_DATA_IN|XS_CTL_SILENT);
1438 
1439 	if (rc == 0) {
1440 		int count;
1441                 int dlen = _4btol(data);
1442                 u_int8_t *c = data + 4;
1443 
1444 		SC_DEBUG(periph, SCSIPI_DB3,
1445 			 ("supported opcode timeout-values loaded\n"));
1446 		SC_DEBUG(periph, SCSIPI_DB3,
1447 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
1448 
1449 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
1450 		    M_DEVBUF, M_WAITOK|M_ZERO);
1451 
1452 		count = 0;
1453                 while (tot != NULL &&
1454 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
1455                         struct scsi_repsupopcode_all_commands_descriptor *acd
1456 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
1457 #ifdef SCSIPI_DEBUG
1458                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
1459 #endif
1460                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1461                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1462                         SC_DEBUG(periph, SCSIPI_DB3,
1463 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
1464 
1465 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
1466 
1467                         if (acd->flags & RSOC_ACD_SERVACTV) {
1468                                 SC_DEBUGN(periph, SCSIPI_DB3,
1469 					 ("0x%02x%02x ",
1470 					  acd->serviceaction[0],
1471 					  acd->serviceaction[1]));
1472                         } else {
1473 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
1474                         }
1475 
1476                         if (acd->flags & RSOC_ACD_CTDP
1477 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
1478                                 struct scsi_repsupopcode_timeouts_descriptor *td
1479 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
1480                                 long nomto = _4btol(td->nom_process_timeout);
1481                                 long cmdto = _4btol(td->cmd_process_timeout);
1482 				long t = (cmdto > nomto) ? cmdto : nomto;
1483 
1484                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1485                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1486 
1487                                 SC_DEBUGN(periph, SCSIPI_DB3,
1488 					  ("0x%02x %10ld %10ld",
1489 					   td->cmd_specific,
1490 					   nomto, cmdto));
1491 
1492 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
1493 					tot->opcode_info[acd->opcode].ti_timeout = t;
1494 					++count;
1495 				}
1496                         }
1497                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
1498                 }
1499 
1500 		if (count > 0) {
1501 			periph->periph_opcs = tot;
1502 		} else {
1503 			free(tot, M_DEVBUF);
1504 			SC_DEBUG(periph, SCSIPI_DB3,
1505 			 	("no usable timeout values available\n"));
1506 		}
1507 	} else {
1508 		SC_DEBUG(periph, SCSIPI_DB3,
1509 			 ("SCSI_MAINTENANCE_IN"
1510 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
1511 			  " - no device provided timeout "
1512 			  "values available\n", rc));
1513 	}
1514 
1515 	free(data, M_DEVBUF);
1516 }
1517 
1518 /*
1519  * scsipi_update_timeouts:
1520  * 	Overide timeout value if device/config provided
1521  *      timeouts are available.
1522  */
1523 static void
1524 scsipi_update_timeouts(struct scsipi_xfer *xs)
1525 {
1526 	struct scsipi_opcodes *opcs;
1527 	u_int8_t cmd;
1528 	int timeout;
1529 	struct scsipi_opinfo *oi;
1530 
1531 	if (xs->timeout <= 0) {
1532 		return;
1533 	}
1534 
1535 	opcs = xs->xs_periph->periph_opcs;
1536 
1537 	if (opcs == NULL) {
1538 		return;
1539 	}
1540 
1541 	cmd = xs->cmd->opcode;
1542 	oi = &opcs->opcode_info[cmd];
1543 
1544 	timeout = 1000 * (int)oi->ti_timeout;
1545 
1546 
1547 	if (timeout > xs->timeout && timeout < 86400000) {
1548 		/*
1549 		 * pick up device configured timeouts if they
1550 		 * are longer than the requested ones but less
1551 		 * than a day
1552 		 */
1553 #ifdef SCSIPI_DEBUG
1554 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
1555 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
1556 				 ("Overriding command 0x%02x "
1557 				  "timeout of %d with %d ms\n",
1558 				  cmd, xs->timeout, timeout));
1559 			oi->ti_flags |= SCSIPI_TI_LOGGED;
1560 		}
1561 #endif
1562 		xs->timeout = timeout;
1563 	}
1564 }
1565 
1566 /*
1567  * scsipi_free_opcodeinfo:
1568  *
1569  * free the opcode information table
1570  */
1571 void
1572 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
1573 {
1574 	if (periph->periph_opcs != NULL) {
1575 		free(periph->periph_opcs, M_DEVBUF);
1576 	}
1577 
1578 	periph->periph_opcs = NULL;
1579 }
1580 
1581 /*
1582  * scsipi_done:
1583  *
1584  *	This routine is called by an adapter's interrupt handler when
1585  *	an xfer is completed.
1586  */
1587 void
1588 scsipi_done(struct scsipi_xfer *xs)
1589 {
1590 	struct scsipi_periph *periph = xs->xs_periph;
1591 	struct scsipi_channel *chan = periph->periph_channel;
1592 	int freezecnt;
1593 
1594 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1595 #ifdef SCSIPI_DEBUG
1596 	if (periph->periph_dbflags & SCSIPI_DB1)
1597 		show_scsipi_cmd(xs);
1598 #endif
1599 
1600 	mutex_enter(chan_mtx(chan));
1601 	SDT_PROBE1(scsi, base, xfer, done,  xs);
1602 	/*
1603 	 * The resource this command was using is now free.
1604 	 */
1605 	if (xs->xs_status & XS_STS_DONE) {
1606 		/* XXX in certain circumstances, such as a device
1607 		 * being detached, a xs that has already been
1608 		 * scsipi_done()'d by the main thread will be done'd
1609 		 * again by scsibusdetach(). Putting the xs on the
1610 		 * chan_complete queue causes list corruption and
1611 		 * everyone dies. This prevents that, but perhaps
1612 		 * there should be better coordination somewhere such
1613 		 * that this won't ever happen (and can be turned into
1614 		 * a KASSERT().
1615 		 */
1616 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
1617 		mutex_exit(chan_mtx(chan));
1618 		goto out;
1619 	}
1620 	scsipi_put_resource(chan);
1621 	xs->xs_periph->periph_sent--;
1622 
1623 	/*
1624 	 * If the command was tagged, free the tag.
1625 	 */
1626 	if (XS_CTL_TAGTYPE(xs) != 0)
1627 		scsipi_put_tag(xs);
1628 	else
1629 		periph->periph_flags &= ~PERIPH_UNTAG;
1630 
1631 	/* Mark the command as `done'. */
1632 	xs->xs_status |= XS_STS_DONE;
1633 
1634 #ifdef DIAGNOSTIC
1635 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1636 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1637 		panic("scsipi_done: ASYNC and POLL");
1638 #endif
1639 
1640 	/*
1641 	 * If the xfer had an error of any sort, freeze the
1642 	 * periph's queue.  Freeze it again if we were requested
1643 	 * to do so in the xfer.
1644 	 */
1645 	freezecnt = 0;
1646 	if (xs->error != XS_NOERROR)
1647 		freezecnt++;
1648 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1649 		freezecnt++;
1650 	if (freezecnt != 0)
1651 		scsipi_periph_freeze_locked(periph, freezecnt);
1652 
1653 	/*
1654 	 * record the xfer with a pending sense, in case a SCSI reset is
1655 	 * received before the thread is waked up.
1656 	 */
1657 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1658 		periph->periph_flags |= PERIPH_SENSE;
1659 		periph->periph_xscheck = xs;
1660 	}
1661 
1662 	/*
1663 	 * If this was an xfer that was not to complete asynchronously,
1664 	 * let the requesting thread perform error checking/handling
1665 	 * in its context.
1666 	 */
1667 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1668 		/*
1669 		 * If it's a polling job, just return, to unwind the
1670 		 * call graph.  We don't need to restart the queue,
1671 		 * because pollings jobs are treated specially, and
1672 		 * are really only used during crash dumps anyway
1673 		 * (XXX or during boot-time autconfiguration of
1674 		 * ATAPI devices).
1675 		 */
1676 		if (xs->xs_control & XS_CTL_POLL) {
1677 			mutex_exit(chan_mtx(chan));
1678 			return;
1679 		}
1680 		cv_broadcast(xs_cv(xs));
1681 		mutex_exit(chan_mtx(chan));
1682 		goto out;
1683 	}
1684 
1685 	/*
1686 	 * Catch the extremely common case of I/O completing
1687 	 * without error; no use in taking a context switch
1688 	 * if we can handle it in interrupt context.
1689 	 */
1690 	if (xs->error == XS_NOERROR) {
1691 		mutex_exit(chan_mtx(chan));
1692 		(void) scsipi_complete(xs);
1693 		goto out;
1694 	}
1695 
1696 	/*
1697 	 * There is an error on this xfer.  Put it on the channel's
1698 	 * completion queue, and wake up the completion thread.
1699 	 */
1700 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1701 	cv_broadcast(chan_cv_complete(chan));
1702 	mutex_exit(chan_mtx(chan));
1703 
1704  out:
1705 	/*
1706 	 * If there are more xfers on the channel's queue, attempt to
1707 	 * run them.
1708 	 */
1709 	scsipi_run_queue(chan);
1710 }
1711 
1712 /*
1713  * scsipi_complete:
1714  *
1715  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1716  *
1717  *	NOTE: This routine MUST be called with valid thread context
1718  *	except for the case where the following two conditions are
1719  *	true:
1720  *
1721  *		xs->error == XS_NOERROR
1722  *		XS_CTL_ASYNC is set in xs->xs_control
1723  *
1724  *	The semantics of this routine can be tricky, so here is an
1725  *	explanation:
1726  *
1727  *		0		Xfer completed successfully.
1728  *
1729  *		ERESTART	Xfer had an error, but was restarted.
1730  *
1731  *		anything else	Xfer had an error, return value is Unix
1732  *				errno.
1733  *
1734  *	If the return value is anything but ERESTART:
1735  *
1736  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1737  *		  the pool.
1738  *		- If there is a buf associated with the xfer,
1739  *		  it has been biodone()'d.
1740  */
1741 static int
1742 scsipi_complete(struct scsipi_xfer *xs)
1743 {
1744 	struct scsipi_periph *periph = xs->xs_periph;
1745 	struct scsipi_channel *chan = periph->periph_channel;
1746 	int error;
1747 
1748 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
1749 
1750 #ifdef DIAGNOSTIC
1751 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1752 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1753 #endif
1754 	/*
1755 	 * If command terminated with a CHECK CONDITION, we need to issue a
1756 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1757 	 * we'll have the real status.
1758 	 * Must be processed with channel lock held to avoid missing
1759 	 * a SCSI bus reset for this command.
1760 	 */
1761 	mutex_enter(chan_mtx(chan));
1762 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1763 		/* request sense for a request sense ? */
1764 		if (xs->xs_control & XS_CTL_REQSENSE) {
1765 			scsipi_printaddr(periph);
1766 			printf("request sense for a request sense ?\n");
1767 			/* XXX maybe we should reset the device ? */
1768 			/* we've been frozen because xs->error != XS_NOERROR */
1769 			scsipi_periph_thaw_locked(periph, 1);
1770 			mutex_exit(chan_mtx(chan));
1771 			if (xs->resid < xs->datalen) {
1772 				printf("we read %d bytes of sense anyway:\n",
1773 				    xs->datalen - xs->resid);
1774 				scsipi_print_sense_data((void *)xs->data, 0);
1775 			}
1776 			return EINVAL;
1777 		}
1778 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1779 		scsipi_request_sense(xs);
1780 	} else
1781 		mutex_exit(chan_mtx(chan));
1782 
1783 	/*
1784 	 * If it's a user level request, bypass all usual completion
1785 	 * processing, let the user work it out..
1786 	 */
1787 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1788 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1789 		mutex_enter(chan_mtx(chan));
1790 		if (xs->error != XS_NOERROR)
1791 			scsipi_periph_thaw_locked(periph, 1);
1792 		mutex_exit(chan_mtx(chan));
1793 		scsipi_user_done(xs);
1794 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1795 		return 0;
1796 	}
1797 
1798 	switch (xs->error) {
1799 	case XS_NOERROR:
1800 		error = 0;
1801 		break;
1802 
1803 	case XS_SENSE:
1804 	case XS_SHORTSENSE:
1805 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1806 		break;
1807 
1808 	case XS_RESOURCE_SHORTAGE:
1809 		/*
1810 		 * XXX Should freeze channel's queue.
1811 		 */
1812 		scsipi_printaddr(periph);
1813 		printf("adapter resource shortage\n");
1814 		/* FALLTHROUGH */
1815 
1816 	case XS_BUSY:
1817 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1818 			struct scsipi_max_openings mo;
1819 
1820 			/*
1821 			 * We set the openings to active - 1, assuming that
1822 			 * the command that got us here is the first one that
1823 			 * can't fit into the device's queue.  If that's not
1824 			 * the case, I guess we'll find out soon enough.
1825 			 */
1826 			mo.mo_target = periph->periph_target;
1827 			mo.mo_lun = periph->periph_lun;
1828 			if (periph->periph_active < periph->periph_openings)
1829 				mo.mo_openings = periph->periph_active - 1;
1830 			else
1831 				mo.mo_openings = periph->periph_openings - 1;
1832 #ifdef DIAGNOSTIC
1833 			if (mo.mo_openings < 0) {
1834 				scsipi_printaddr(periph);
1835 				printf("QUEUE FULL resulted in < 0 openings\n");
1836 				panic("scsipi_done");
1837 			}
1838 #endif
1839 			if (mo.mo_openings == 0) {
1840 				scsipi_printaddr(periph);
1841 				printf("QUEUE FULL resulted in 0 openings\n");
1842 				mo.mo_openings = 1;
1843 			}
1844 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1845 			error = ERESTART;
1846 		} else if (xs->xs_retries != 0) {
1847 			xs->xs_retries--;
1848 			/*
1849 			 * Wait one second, and try again.
1850 			 */
1851 			mutex_enter(chan_mtx(chan));
1852 			if ((xs->xs_control & XS_CTL_POLL) ||
1853 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1854 				/* XXX: quite extreme */
1855 				kpause("xsbusy", false, hz, chan_mtx(chan));
1856 			} else if (!callout_pending(&periph->periph_callout)) {
1857 				scsipi_periph_freeze_locked(periph, 1);
1858 				callout_reset(&periph->periph_callout,
1859 				    hz, scsipi_periph_timed_thaw, periph);
1860 			}
1861 			mutex_exit(chan_mtx(chan));
1862 			error = ERESTART;
1863 		} else
1864 			error = EBUSY;
1865 		break;
1866 
1867 	case XS_REQUEUE:
1868 		error = ERESTART;
1869 		break;
1870 
1871 	case XS_SELTIMEOUT:
1872 	case XS_TIMEOUT:
1873 		/*
1874 		 * If the device hasn't gone away, honor retry counts.
1875 		 *
1876 		 * Note that if we're in the middle of probing it,
1877 		 * it won't be found because it isn't here yet so
1878 		 * we won't honor the retry count in that case.
1879 		 */
1880 		if (scsipi_lookup_periph(chan, periph->periph_target,
1881 		    periph->periph_lun) && xs->xs_retries != 0) {
1882 			xs->xs_retries--;
1883 			error = ERESTART;
1884 		} else
1885 			error = EIO;
1886 		break;
1887 
1888 	case XS_RESET:
1889 		if (xs->xs_control & XS_CTL_REQSENSE) {
1890 			/*
1891 			 * request sense interrupted by reset: signal it
1892 			 * with EINTR return code.
1893 			 */
1894 			error = EINTR;
1895 		} else {
1896 			if (xs->xs_retries != 0) {
1897 				xs->xs_retries--;
1898 				error = ERESTART;
1899 			} else
1900 				error = EIO;
1901 		}
1902 		break;
1903 
1904 	case XS_DRIVER_STUFFUP:
1905 		scsipi_printaddr(periph);
1906 		printf("generic HBA error\n");
1907 		error = EIO;
1908 		break;
1909 	default:
1910 		scsipi_printaddr(periph);
1911 		printf("invalid return code from adapter: %d\n", xs->error);
1912 		error = EIO;
1913 		break;
1914 	}
1915 
1916 	mutex_enter(chan_mtx(chan));
1917 	if (error == ERESTART) {
1918 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
1919 		/*
1920 		 * If we get here, the periph has been thawed and frozen
1921 		 * again if we had to issue recovery commands.  Alternatively,
1922 		 * it may have been frozen again and in a timed thaw.  In
1923 		 * any case, we thaw the periph once we re-enqueue the
1924 		 * command.  Once the periph is fully thawed, it will begin
1925 		 * operation again.
1926 		 */
1927 		xs->error = XS_NOERROR;
1928 		xs->status = SCSI_OK;
1929 		xs->xs_status &= ~XS_STS_DONE;
1930 		xs->xs_requeuecnt++;
1931 		error = scsipi_enqueue(xs);
1932 		if (error == 0) {
1933 			scsipi_periph_thaw_locked(periph, 1);
1934 			mutex_exit(chan_mtx(chan));
1935 			return ERESTART;
1936 		}
1937 	}
1938 
1939 	/*
1940 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1941 	 * Thaw it here.
1942 	 */
1943 	if (xs->error != XS_NOERROR)
1944 		scsipi_periph_thaw_locked(periph, 1);
1945 	mutex_exit(chan_mtx(chan));
1946 
1947 	if (periph->periph_switch->psw_done)
1948 		periph->periph_switch->psw_done(xs, error);
1949 
1950 	mutex_enter(chan_mtx(chan));
1951 	if (xs->xs_control & XS_CTL_ASYNC)
1952 		scsipi_put_xs(xs);
1953 	mutex_exit(chan_mtx(chan));
1954 
1955 	return error;
1956 }
1957 
1958 /*
1959  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1960  * returns with a CHECK_CONDITION status. Must be called in valid thread
1961  * context.
1962  */
1963 
1964 static void
1965 scsipi_request_sense(struct scsipi_xfer *xs)
1966 {
1967 	struct scsipi_periph *periph = xs->xs_periph;
1968 	int flags, error;
1969 	struct scsi_request_sense cmd;
1970 
1971 	periph->periph_flags |= PERIPH_SENSE;
1972 
1973 	/* if command was polling, request sense will too */
1974 	flags = xs->xs_control & XS_CTL_POLL;
1975 	/* Polling commands can't sleep */
1976 	if (flags)
1977 		flags |= XS_CTL_NOSLEEP;
1978 
1979 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1980 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1981 
1982 	memset(&cmd, 0, sizeof(cmd));
1983 	cmd.opcode = SCSI_REQUEST_SENSE;
1984 	cmd.length = sizeof(struct scsi_sense_data);
1985 
1986 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1987 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1988 	    0, 1000, NULL, flags);
1989 	periph->periph_flags &= ~PERIPH_SENSE;
1990 	periph->periph_xscheck = NULL;
1991 	switch (error) {
1992 	case 0:
1993 		/* we have a valid sense */
1994 		xs->error = XS_SENSE;
1995 		return;
1996 	case EINTR:
1997 		/* REQUEST_SENSE interrupted by bus reset. */
1998 		xs->error = XS_RESET;
1999 		return;
2000 	case EIO:
2001 		 /* request sense coudn't be performed */
2002 		/*
2003 		 * XXX this isn't quite right but we don't have anything
2004 		 * better for now
2005 		 */
2006 		xs->error = XS_DRIVER_STUFFUP;
2007 		return;
2008 	default:
2009 		 /* Notify that request sense failed. */
2010 		xs->error = XS_DRIVER_STUFFUP;
2011 		scsipi_printaddr(periph);
2012 		printf("request sense failed with error %d\n", error);
2013 		return;
2014 	}
2015 }
2016 
2017 /*
2018  * scsipi_enqueue:
2019  *
2020  *	Enqueue an xfer on a channel.
2021  */
2022 static int
2023 scsipi_enqueue(struct scsipi_xfer *xs)
2024 {
2025 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
2026 	struct scsipi_xfer *qxs;
2027 
2028 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
2029 
2030 	/*
2031 	 * If the xfer is to be polled, and there are already jobs on
2032 	 * the queue, we can't proceed.
2033 	 */
2034 	KASSERT(mutex_owned(chan_mtx(chan)));
2035 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
2036 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
2037 		xs->error = XS_DRIVER_STUFFUP;
2038 		return EAGAIN;
2039 	}
2040 
2041 	/*
2042 	 * If we have an URGENT xfer, it's an error recovery command
2043 	 * and it should just go on the head of the channel's queue.
2044 	 */
2045 	if (xs->xs_control & XS_CTL_URGENT) {
2046 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
2047 		goto out;
2048 	}
2049 
2050 	/*
2051 	 * If this xfer has already been on the queue before, we
2052 	 * need to reinsert it in the correct order.  That order is:
2053 	 *
2054 	 *	Immediately before the first xfer for this periph
2055 	 *	with a requeuecnt less than xs->xs_requeuecnt.
2056 	 *
2057 	 * Failing that, at the end of the queue.  (We'll end up
2058 	 * there naturally.)
2059 	 */
2060 	if (xs->xs_requeuecnt != 0) {
2061 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
2062 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
2063 			if (qxs->xs_periph == xs->xs_periph &&
2064 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
2065 				break;
2066 		}
2067 		if (qxs != NULL) {
2068 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
2069 			    channel_q);
2070 			goto out;
2071 		}
2072 	}
2073 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
2074  out:
2075 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
2076 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
2077 	return 0;
2078 }
2079 
2080 /*
2081  * scsipi_run_queue:
2082  *
2083  *	Start as many xfers as possible running on the channel.
2084  */
2085 static void
2086 scsipi_run_queue(struct scsipi_channel *chan)
2087 {
2088 	struct scsipi_xfer *xs;
2089 	struct scsipi_periph *periph;
2090 
2091 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
2092 	for (;;) {
2093 		mutex_enter(chan_mtx(chan));
2094 
2095 		/*
2096 		 * If the channel is frozen, we can't do any work right
2097 		 * now.
2098 		 */
2099 		if (chan->chan_qfreeze != 0) {
2100 			mutex_exit(chan_mtx(chan));
2101 			break;
2102 		}
2103 
2104 		/*
2105 		 * Look for work to do, and make sure we can do it.
2106 		 */
2107 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
2108 		     xs = TAILQ_NEXT(xs, channel_q)) {
2109 			periph = xs->xs_periph;
2110 
2111 			if ((periph->periph_sent >= periph->periph_openings) ||
2112 			    periph->periph_qfreeze != 0 ||
2113 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
2114 				continue;
2115 
2116 			if ((periph->periph_flags &
2117 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
2118 			    (xs->xs_control & XS_CTL_URGENT) == 0)
2119 				continue;
2120 
2121 			/*
2122 			 * We can issue this xfer!
2123 			 */
2124 			goto got_one;
2125 		}
2126 
2127 		/*
2128 		 * Can't find any work to do right now.
2129 		 */
2130 		mutex_exit(chan_mtx(chan));
2131 		break;
2132 
2133  got_one:
2134 		/*
2135 		 * Have an xfer to run.  Allocate a resource from
2136 		 * the adapter to run it.  If we can't allocate that
2137 		 * resource, we don't dequeue the xfer.
2138 		 */
2139 		if (scsipi_get_resource(chan) == 0) {
2140 			/*
2141 			 * Adapter is out of resources.  If the adapter
2142 			 * supports it, attempt to grow them.
2143 			 */
2144 			if (scsipi_grow_resources(chan) == 0) {
2145 				/*
2146 				 * Wasn't able to grow resources,
2147 				 * nothing more we can do.
2148 				 */
2149 				if (xs->xs_control & XS_CTL_POLL) {
2150 					scsipi_printaddr(xs->xs_periph);
2151 					printf("polling command but no "
2152 					    "adapter resources");
2153 					/* We'll panic shortly... */
2154 				}
2155 				mutex_exit(chan_mtx(chan));
2156 
2157 				/*
2158 				 * XXX: We should be able to note that
2159 				 * XXX: that resources are needed here!
2160 				 */
2161 				break;
2162 			}
2163 			/*
2164 			 * scsipi_grow_resources() allocated the resource
2165 			 * for us.
2166 			 */
2167 		}
2168 
2169 		/*
2170 		 * We have a resource to run this xfer, do it!
2171 		 */
2172 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2173 
2174 		/*
2175 		 * If the command is to be tagged, allocate a tag ID
2176 		 * for it.
2177 		 */
2178 		if (XS_CTL_TAGTYPE(xs) != 0)
2179 			scsipi_get_tag(xs);
2180 		else
2181 			periph->periph_flags |= PERIPH_UNTAG;
2182 		periph->periph_sent++;
2183 		mutex_exit(chan_mtx(chan));
2184 
2185 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
2186 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
2187 	}
2188 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
2189 }
2190 
2191 /*
2192  * scsipi_execute_xs:
2193  *
2194  *	Begin execution of an xfer, waiting for it to complete, if necessary.
2195  */
2196 int
2197 scsipi_execute_xs(struct scsipi_xfer *xs)
2198 {
2199 	struct scsipi_periph *periph = xs->xs_periph;
2200 	struct scsipi_channel *chan = periph->periph_channel;
2201 	int oasync, async, poll, error;
2202 
2203 	KASSERT(!cold);
2204 
2205 	scsipi_update_timeouts(xs);
2206 
2207 	(chan->chan_bustype->bustype_cmd)(xs);
2208 
2209 	xs->xs_status &= ~XS_STS_DONE;
2210 	xs->error = XS_NOERROR;
2211 	xs->resid = xs->datalen;
2212 	xs->status = SCSI_OK;
2213 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
2214 
2215 #ifdef SCSIPI_DEBUG
2216 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
2217 		printf("scsipi_execute_xs: ");
2218 		show_scsipi_xs(xs);
2219 		printf("\n");
2220 	}
2221 #endif
2222 
2223 	/*
2224 	 * Deal with command tagging:
2225 	 *
2226 	 *	- If the device's current operating mode doesn't
2227 	 *	  include tagged queueing, clear the tag mask.
2228 	 *
2229 	 *	- If the device's current operating mode *does*
2230 	 *	  include tagged queueing, set the tag_type in
2231 	 *	  the xfer to the appropriate byte for the tag
2232 	 *	  message.
2233 	 */
2234 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
2235 		(xs->xs_control & XS_CTL_REQSENSE)) {
2236 		xs->xs_control &= ~XS_CTL_TAGMASK;
2237 		xs->xs_tag_type = 0;
2238 	} else {
2239 		/*
2240 		 * If the request doesn't specify a tag, give Head
2241 		 * tags to URGENT operations and Simple tags to
2242 		 * everything else.
2243 		 */
2244 		if (XS_CTL_TAGTYPE(xs) == 0) {
2245 			if (xs->xs_control & XS_CTL_URGENT)
2246 				xs->xs_control |= XS_CTL_HEAD_TAG;
2247 			else
2248 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
2249 		}
2250 
2251 		switch (XS_CTL_TAGTYPE(xs)) {
2252 		case XS_CTL_ORDERED_TAG:
2253 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2254 			break;
2255 
2256 		case XS_CTL_SIMPLE_TAG:
2257 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2258 			break;
2259 
2260 		case XS_CTL_HEAD_TAG:
2261 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2262 			break;
2263 
2264 		default:
2265 			scsipi_printaddr(periph);
2266 			printf("invalid tag mask 0x%08x\n",
2267 			    XS_CTL_TAGTYPE(xs));
2268 			panic("scsipi_execute_xs");
2269 		}
2270 	}
2271 
2272 	/* If the adaptor wants us to poll, poll. */
2273 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2274 		xs->xs_control |= XS_CTL_POLL;
2275 
2276 	/*
2277 	 * If we don't yet have a completion thread, or we are to poll for
2278 	 * completion, clear the ASYNC flag.
2279 	 */
2280 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
2281 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2282 		xs->xs_control &= ~XS_CTL_ASYNC;
2283 
2284 	async = (xs->xs_control & XS_CTL_ASYNC);
2285 	poll = (xs->xs_control & XS_CTL_POLL);
2286 
2287 #ifdef DIAGNOSTIC
2288 	if (oasync != 0 && xs->bp == NULL)
2289 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2290 #endif
2291 
2292 	/*
2293 	 * Enqueue the transfer.  If we're not polling for completion, this
2294 	 * should ALWAYS return `no error'.
2295 	 */
2296 	error = scsipi_enqueue(xs);
2297 	if (error) {
2298 		if (poll == 0) {
2299 			scsipi_printaddr(periph);
2300 			printf("not polling, but enqueue failed with %d\n",
2301 			    error);
2302 			panic("scsipi_execute_xs");
2303 		}
2304 
2305 		scsipi_printaddr(periph);
2306 		printf("should have flushed queue?\n");
2307 		goto free_xs;
2308 	}
2309 
2310 	mutex_exit(chan_mtx(chan));
2311  restarted:
2312 	scsipi_run_queue(chan);
2313 	mutex_enter(chan_mtx(chan));
2314 
2315 	/*
2316 	 * The xfer is enqueued, and possibly running.  If it's to be
2317 	 * completed asynchronously, just return now.
2318 	 */
2319 	if (async)
2320 		return 0;
2321 
2322 	/*
2323 	 * Not an asynchronous command; wait for it to complete.
2324 	 */
2325 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2326 		if (poll) {
2327 			scsipi_printaddr(periph);
2328 			printf("polling command not done\n");
2329 			panic("scsipi_execute_xs");
2330 		}
2331 		cv_wait(xs_cv(xs), chan_mtx(chan));
2332 	}
2333 
2334 	/*
2335 	 * Command is complete.  scsipi_done() has awakened us to perform
2336 	 * the error handling.
2337 	 */
2338 	mutex_exit(chan_mtx(chan));
2339 	error = scsipi_complete(xs);
2340 	if (error == ERESTART)
2341 		goto restarted;
2342 
2343 	/*
2344 	 * If it was meant to run async and we cleared aync ourselve,
2345 	 * don't return an error here. It has already been handled
2346 	 */
2347 	if (oasync)
2348 		error = 0;
2349 	/*
2350 	 * Command completed successfully or fatal error occurred.  Fall
2351 	 * into....
2352 	 */
2353 	mutex_enter(chan_mtx(chan));
2354  free_xs:
2355 	scsipi_put_xs(xs);
2356 	mutex_exit(chan_mtx(chan));
2357 
2358 	/*
2359 	 * Kick the queue, keep it running in case it stopped for some
2360 	 * reason.
2361 	 */
2362 	scsipi_run_queue(chan);
2363 
2364 	mutex_enter(chan_mtx(chan));
2365 	return error;
2366 }
2367 
2368 /*
2369  * scsipi_completion_thread:
2370  *
2371  *	This is the completion thread.  We wait for errors on
2372  *	asynchronous xfers, and perform the error handling
2373  *	function, restarting the command, if necessary.
2374  */
2375 static void
2376 scsipi_completion_thread(void *arg)
2377 {
2378 	struct scsipi_channel *chan = arg;
2379 	struct scsipi_xfer *xs;
2380 
2381 	if (chan->chan_init_cb)
2382 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2383 
2384 	mutex_enter(chan_mtx(chan));
2385 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2386 	for (;;) {
2387 		xs = TAILQ_FIRST(&chan->chan_complete);
2388 		if (xs == NULL && chan->chan_tflags == 0) {
2389 			/* nothing to do; wait */
2390 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2391 			continue;
2392 		}
2393 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2394 			/* call chan_callback from thread context */
2395 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2396 			chan->chan_callback(chan, chan->chan_callback_arg);
2397 			continue;
2398 		}
2399 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2400 			/* attempt to get more openings for this channel */
2401 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2402 			mutex_exit(chan_mtx(chan));
2403 			scsipi_adapter_request(chan,
2404 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2405 			scsipi_channel_thaw(chan, 1);
2406 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2407 				kpause("scsizzz", FALSE, hz/10, NULL);
2408 			mutex_enter(chan_mtx(chan));
2409 			continue;
2410 		}
2411 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2412 			/* explicitly run the queues for this channel */
2413 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2414 			mutex_exit(chan_mtx(chan));
2415 			scsipi_run_queue(chan);
2416 			mutex_enter(chan_mtx(chan));
2417 			continue;
2418 		}
2419 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2420 			break;
2421 		}
2422 		if (xs) {
2423 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2424 			mutex_exit(chan_mtx(chan));
2425 
2426 			/*
2427 			 * Have an xfer with an error; process it.
2428 			 */
2429 			(void) scsipi_complete(xs);
2430 
2431 			/*
2432 			 * Kick the queue; keep it running if it was stopped
2433 			 * for some reason.
2434 			 */
2435 			scsipi_run_queue(chan);
2436 			mutex_enter(chan_mtx(chan));
2437 		}
2438 	}
2439 
2440 	chan->chan_thread = NULL;
2441 
2442 	/* In case parent is waiting for us to exit. */
2443 	cv_broadcast(chan_cv_thread(chan));
2444 	mutex_exit(chan_mtx(chan));
2445 
2446 	kthread_exit(0);
2447 }
2448 /*
2449  * scsipi_thread_call_callback:
2450  *
2451  * 	request to call a callback from the completion thread
2452  */
2453 int
2454 scsipi_thread_call_callback(struct scsipi_channel *chan,
2455     void (*callback)(struct scsipi_channel *, void *), void *arg)
2456 {
2457 
2458 	mutex_enter(chan_mtx(chan));
2459 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2460 		/* kernel thread doesn't exist yet */
2461 		mutex_exit(chan_mtx(chan));
2462 		return ESRCH;
2463 	}
2464 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2465 		mutex_exit(chan_mtx(chan));
2466 		return EBUSY;
2467 	}
2468 	scsipi_channel_freeze(chan, 1);
2469 	chan->chan_callback = callback;
2470 	chan->chan_callback_arg = arg;
2471 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2472 	cv_broadcast(chan_cv_complete(chan));
2473 	mutex_exit(chan_mtx(chan));
2474 	return 0;
2475 }
2476 
2477 /*
2478  * scsipi_async_event:
2479  *
2480  *	Handle an asynchronous event from an adapter.
2481  */
2482 void
2483 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2484     void *arg)
2485 {
2486 	bool lock = chan_running(chan) > 0;
2487 
2488 	if (lock)
2489 		mutex_enter(chan_mtx(chan));
2490 	switch (event) {
2491 	case ASYNC_EVENT_MAX_OPENINGS:
2492 		scsipi_async_event_max_openings(chan,
2493 		    (struct scsipi_max_openings *)arg);
2494 		break;
2495 
2496 	case ASYNC_EVENT_XFER_MODE:
2497 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2498 			chan->chan_bustype->bustype_async_event_xfer_mode(
2499 			    chan, arg);
2500 		}
2501 		break;
2502 	case ASYNC_EVENT_RESET:
2503 		scsipi_async_event_channel_reset(chan);
2504 		break;
2505 	}
2506 	if (lock)
2507 		mutex_exit(chan_mtx(chan));
2508 }
2509 
2510 /*
2511  * scsipi_async_event_max_openings:
2512  *
2513  *	Update the maximum number of outstanding commands a
2514  *	device may have.
2515  */
2516 static void
2517 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2518     struct scsipi_max_openings *mo)
2519 {
2520 	struct scsipi_periph *periph;
2521 	int minlun, maxlun;
2522 
2523 	if (mo->mo_lun == -1) {
2524 		/*
2525 		 * Wildcarded; apply it to all LUNs.
2526 		 */
2527 		minlun = 0;
2528 		maxlun = chan->chan_nluns - 1;
2529 	} else
2530 		minlun = maxlun = mo->mo_lun;
2531 
2532 	/* XXX This could really suck with a large LUN space. */
2533 	for (; minlun <= maxlun; minlun++) {
2534 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2535 		if (periph == NULL)
2536 			continue;
2537 
2538 		if (mo->mo_openings < periph->periph_openings)
2539 			periph->periph_openings = mo->mo_openings;
2540 		else if (mo->mo_openings > periph->periph_openings &&
2541 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2542 			periph->periph_openings = mo->mo_openings;
2543 	}
2544 }
2545 
2546 /*
2547  * scsipi_set_xfer_mode:
2548  *
2549  *	Set the xfer mode for the specified I_T Nexus.
2550  */
2551 void
2552 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2553 {
2554 	struct scsipi_xfer_mode xm;
2555 	struct scsipi_periph *itperiph;
2556 	int lun;
2557 
2558 	/*
2559 	 * Go to the minimal xfer mode.
2560 	 */
2561 	xm.xm_target = target;
2562 	xm.xm_mode = 0;
2563 	xm.xm_period = 0;			/* ignored */
2564 	xm.xm_offset = 0;			/* ignored */
2565 
2566 	/*
2567 	 * Find the first LUN we know about on this I_T Nexus.
2568 	 */
2569 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2570 		itperiph = scsipi_lookup_periph(chan, target, lun);
2571 		if (itperiph != NULL)
2572 			break;
2573 	}
2574 	if (itperiph != NULL) {
2575 		xm.xm_mode = itperiph->periph_cap;
2576 		/*
2577 		 * Now issue the request to the adapter.
2578 		 */
2579 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2580 		/*
2581 		 * If we want this to happen immediately, issue a dummy
2582 		 * command, since most adapters can't really negotiate unless
2583 		 * they're executing a job.
2584 		 */
2585 		if (immed != 0) {
2586 			(void) scsipi_test_unit_ready(itperiph,
2587 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2588 			    XS_CTL_IGNORE_NOT_READY |
2589 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2590 		}
2591 	}
2592 }
2593 
2594 /*
2595  * scsipi_channel_reset:
2596  *
2597  *	handle scsi bus reset
2598  * called with channel lock held
2599  */
2600 static void
2601 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2602 {
2603 	struct scsipi_xfer *xs, *xs_next;
2604 	struct scsipi_periph *periph;
2605 	int target, lun;
2606 
2607 	/*
2608 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2609 	 * commands; as the sense is not available any more.
2610 	 * can't call scsipi_done() from here, as the command has not been
2611 	 * sent to the adapter yet (this would corrupt accounting).
2612 	 */
2613 
2614 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2615 		xs_next = TAILQ_NEXT(xs, channel_q);
2616 		if (xs->xs_control & XS_CTL_REQSENSE) {
2617 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2618 			xs->error = XS_RESET;
2619 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2620 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2621 				    channel_q);
2622 		}
2623 	}
2624 	cv_broadcast(chan_cv_complete(chan));
2625 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2626 	for (target = 0; target < chan->chan_ntargets; target++) {
2627 		if (target == chan->chan_id)
2628 			continue;
2629 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2630 			periph = scsipi_lookup_periph_locked(chan, target, lun);
2631 			if (periph) {
2632 				xs = periph->periph_xscheck;
2633 				if (xs)
2634 					xs->error = XS_RESET;
2635 			}
2636 		}
2637 	}
2638 }
2639 
2640 /*
2641  * scsipi_target_detach:
2642  *
2643  *	detach all periph associated with a I_T
2644  * 	must be called from valid thread context
2645  */
2646 int
2647 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2648     int flags)
2649 {
2650 	struct scsipi_periph *periph;
2651 	device_t tdev;
2652 	int ctarget, mintarget, maxtarget;
2653 	int clun, minlun, maxlun;
2654 	int error = 0;
2655 
2656 	if (target == -1) {
2657 		mintarget = 0;
2658 		maxtarget = chan->chan_ntargets;
2659 	} else {
2660 		if (target == chan->chan_id)
2661 			return EINVAL;
2662 		if (target < 0 || target >= chan->chan_ntargets)
2663 			return EINVAL;
2664 		mintarget = target;
2665 		maxtarget = target + 1;
2666 	}
2667 
2668 	if (lun == -1) {
2669 		minlun = 0;
2670 		maxlun = chan->chan_nluns;
2671 	} else {
2672 		if (lun < 0 || lun >= chan->chan_nluns)
2673 			return EINVAL;
2674 		minlun = lun;
2675 		maxlun = lun + 1;
2676 	}
2677 
2678 	/* for config_detach */
2679 	KERNEL_LOCK(1, curlwp);
2680 
2681 	mutex_enter(chan_mtx(chan));
2682 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2683 		if (ctarget == chan->chan_id)
2684 			continue;
2685 
2686 		for (clun = minlun; clun < maxlun; clun++) {
2687 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2688 			if (periph == NULL)
2689 				continue;
2690 			tdev = periph->periph_dev;
2691 			mutex_exit(chan_mtx(chan));
2692 			error = config_detach(tdev, flags);
2693 			if (error)
2694 				goto out;
2695 			mutex_enter(chan_mtx(chan));
2696 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2697 		}
2698 	}
2699 	mutex_exit(chan_mtx(chan));
2700 
2701 out:
2702 	KERNEL_UNLOCK_ONE(curlwp);
2703 
2704 	return error;
2705 }
2706 
2707 /*
2708  * scsipi_adapter_addref:
2709  *
2710  *	Add a reference to the adapter pointed to by the provided
2711  *	link, enabling the adapter if necessary.
2712  */
2713 int
2714 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2715 {
2716 	int error = 0;
2717 
2718 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2719 	    && adapt->adapt_enable != NULL) {
2720 		scsipi_adapter_lock(adapt);
2721 		error = scsipi_adapter_enable(adapt, 1);
2722 		scsipi_adapter_unlock(adapt);
2723 		if (error)
2724 			atomic_dec_uint(&adapt->adapt_refcnt);
2725 	}
2726 	return error;
2727 }
2728 
2729 /*
2730  * scsipi_adapter_delref:
2731  *
2732  *	Delete a reference to the adapter pointed to by the provided
2733  *	link, disabling the adapter if possible.
2734  */
2735 void
2736 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2737 {
2738 
2739 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2740 	    && adapt->adapt_enable != NULL) {
2741 		scsipi_adapter_lock(adapt);
2742 		(void) scsipi_adapter_enable(adapt, 0);
2743 		scsipi_adapter_unlock(adapt);
2744 	}
2745 }
2746 
2747 static struct scsipi_syncparam {
2748 	int	ss_factor;
2749 	int	ss_period;	/* ns * 100 */
2750 } scsipi_syncparams[] = {
2751 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2752 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2753 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2754 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2755 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2756 };
2757 static const int scsipi_nsyncparams =
2758     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2759 
2760 int
2761 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2762 {
2763 	int i;
2764 
2765 	for (i = 0; i < scsipi_nsyncparams; i++) {
2766 		if (period <= scsipi_syncparams[i].ss_period)
2767 			return scsipi_syncparams[i].ss_factor;
2768 	}
2769 
2770 	return (period / 100) / 4;
2771 }
2772 
2773 int
2774 scsipi_sync_factor_to_period(int factor)
2775 {
2776 	int i;
2777 
2778 	for (i = 0; i < scsipi_nsyncparams; i++) {
2779 		if (factor == scsipi_syncparams[i].ss_factor)
2780 			return scsipi_syncparams[i].ss_period;
2781 	}
2782 
2783 	return (factor * 4) * 100;
2784 }
2785 
2786 int
2787 scsipi_sync_factor_to_freq(int factor)
2788 {
2789 	int i;
2790 
2791 	for (i = 0; i < scsipi_nsyncparams; i++) {
2792 		if (factor == scsipi_syncparams[i].ss_factor)
2793 			return 100000000 / scsipi_syncparams[i].ss_period;
2794 	}
2795 
2796 	return 10000000 / ((factor * 4) * 10);
2797 }
2798 
2799 static inline void
2800 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2801 {
2802 
2803 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2804 		KERNEL_LOCK(1, NULL);
2805 }
2806 
2807 static inline void
2808 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2809 {
2810 
2811 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2812 		KERNEL_UNLOCK_ONE(NULL);
2813 }
2814 
2815 void
2816 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2817 {
2818 	struct scsipi_adapter *adapt = chan->chan_adapter;
2819 
2820 	scsipi_adapter_lock(adapt);
2821 	(adapt->adapt_minphys)(bp);
2822 	scsipi_adapter_unlock(chan->chan_adapter);
2823 }
2824 
2825 void
2826 scsipi_adapter_request(struct scsipi_channel *chan,
2827 	scsipi_adapter_req_t req, void *arg)
2828 
2829 {
2830 	struct scsipi_adapter *adapt = chan->chan_adapter;
2831 
2832 	scsipi_adapter_lock(adapt);
2833 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
2834 	(adapt->adapt_request)(chan, req, arg);
2835 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
2836 	scsipi_adapter_unlock(adapt);
2837 }
2838 
2839 int
2840 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2841 	void *data, int flag, struct proc *p)
2842 {
2843 	struct scsipi_adapter *adapt = chan->chan_adapter;
2844 	int error;
2845 
2846 	if (adapt->adapt_ioctl == NULL)
2847 		return ENOTTY;
2848 
2849 	scsipi_adapter_lock(adapt);
2850 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2851 	scsipi_adapter_unlock(adapt);
2852 	return error;
2853 }
2854 
2855 int
2856 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2857 {
2858 	int error;
2859 
2860 	scsipi_adapter_lock(adapt);
2861 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2862 	scsipi_adapter_unlock(adapt);
2863 	return error;
2864 }
2865 
2866 #ifdef SCSIPI_DEBUG
2867 /*
2868  * Given a scsipi_xfer, dump the request, in all its glory
2869  */
2870 void
2871 show_scsipi_xs(struct scsipi_xfer *xs)
2872 {
2873 
2874 	printf("xs(%p): ", xs);
2875 	printf("xs_control(0x%08x)", xs->xs_control);
2876 	printf("xs_status(0x%08x)", xs->xs_status);
2877 	printf("periph(%p)", xs->xs_periph);
2878 	printf("retr(0x%x)", xs->xs_retries);
2879 	printf("timo(0x%x)", xs->timeout);
2880 	printf("cmd(%p)", xs->cmd);
2881 	printf("len(0x%x)", xs->cmdlen);
2882 	printf("data(%p)", xs->data);
2883 	printf("len(0x%x)", xs->datalen);
2884 	printf("res(0x%x)", xs->resid);
2885 	printf("err(0x%x)", xs->error);
2886 	printf("bp(%p)", xs->bp);
2887 	show_scsipi_cmd(xs);
2888 }
2889 
2890 void
2891 show_scsipi_cmd(struct scsipi_xfer *xs)
2892 {
2893 	u_char *b = (u_char *) xs->cmd;
2894 	int i = 0;
2895 
2896 	scsipi_printaddr(xs->xs_periph);
2897 	printf(" command: ");
2898 
2899 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2900 		while (i < xs->cmdlen) {
2901 			if (i)
2902 				printf(",");
2903 			printf("0x%x", b[i++]);
2904 		}
2905 		printf("-[%d bytes]\n", xs->datalen);
2906 		if (xs->datalen)
2907 			show_mem(xs->data, uimin(64, xs->datalen));
2908 	} else
2909 		printf("-RESET-\n");
2910 }
2911 
2912 void
2913 show_mem(u_char *address, int num)
2914 {
2915 	int x;
2916 
2917 	printf("------------------------------");
2918 	for (x = 0; x < num; x++) {
2919 		if ((x % 16) == 0)
2920 			printf("\n%03d: ", x);
2921 		printf("%02x ", *address++);
2922 	}
2923 	printf("\n------------------------------\n");
2924 }
2925 #endif /* SCSIPI_DEBUG */
2926