xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision f31867506b3d03104824848de4007d9c967e81b2)
1 /*	$NetBSD: scsipi_base.c,v 1.190 2024/06/14 18:44:18 kardel Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.190 2024/06/14 18:44:18 kardel Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53 
54 #include <dev/scsipi/scsi_sdt.h>
55 #include <dev/scsipi/scsi_spc.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsipi_disk.h>
58 #include <dev/scsipi/scsipiconf.h>
59 #include <dev/scsipi/scsipi_base.h>
60 
61 #include <dev/scsipi/scsi_all.h>
62 #include <dev/scsipi/scsi_message.h>
63 
64 #include <machine/param.h>
65 
66 SDT_PROVIDER_DEFINE(scsi);
67 
68 SDT_PROBE_DEFINE3(scsi, base, tag, get,
69     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
70 SDT_PROBE_DEFINE3(scsi, base, tag, put,
71     "struct scsipi_xfer *"/*xs*/, "uint8_t"/*tag*/, "uint8_t"/*type*/);
72 
73 SDT_PROBE_DEFINE3(scsi, base, adapter, request__start,
74     "struct scsipi_channel *"/*chan*/,
75     "scsipi_adapter_req_t"/*req*/,
76     "void *"/*arg*/);
77 SDT_PROBE_DEFINE3(scsi, base, adapter, request__done,
78     "struct scsipi_channel *"/*chan*/,
79     "scsipi_adapter_req_t"/*req*/,
80     "void *"/*arg*/);
81 
82 SDT_PROBE_DEFINE1(scsi, base, queue, batch__start,
83     "struct scsipi_channel *"/*chan*/);
84 SDT_PROBE_DEFINE2(scsi, base, queue, run,
85     "struct scsipi_channel *"/*chan*/,
86     "struct scsipi_xfer *"/*xs*/);
87 SDT_PROBE_DEFINE1(scsi, base, queue, batch__done,
88     "struct scsipi_channel *"/*chan*/);
89 
90 SDT_PROBE_DEFINE1(scsi, base, xfer, execute,  "struct scsipi_xfer *"/*xs*/);
91 SDT_PROBE_DEFINE1(scsi, base, xfer, enqueue,  "struct scsipi_xfer *"/*xs*/);
92 SDT_PROBE_DEFINE1(scsi, base, xfer, done,  "struct scsipi_xfer *"/*xs*/);
93 SDT_PROBE_DEFINE1(scsi, base, xfer, redone,  "struct scsipi_xfer *"/*xs*/);
94 SDT_PROBE_DEFINE1(scsi, base, xfer, complete,  "struct scsipi_xfer *"/*xs*/);
95 SDT_PROBE_DEFINE1(scsi, base, xfer, restart,  "struct scsipi_xfer *"/*xs*/);
96 SDT_PROBE_DEFINE1(scsi, base, xfer, free,  "struct scsipi_xfer *"/*xs*/);
97 
98 static int	scsipi_complete(struct scsipi_xfer *);
99 static void	scsipi_request_sense(struct scsipi_xfer *);
100 static int	scsipi_enqueue(struct scsipi_xfer *);
101 static void	scsipi_run_queue(struct scsipi_channel *chan);
102 
103 static void	scsipi_completion_thread(void *);
104 
105 static void	scsipi_get_tag(struct scsipi_xfer *);
106 static void	scsipi_put_tag(struct scsipi_xfer *);
107 
108 static int	scsipi_get_resource(struct scsipi_channel *);
109 static void	scsipi_put_resource(struct scsipi_channel *);
110 
111 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
112 		    struct scsipi_max_openings *);
113 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
114 
115 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
116 
117 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
118 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
119 
120 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
121 
122 static struct pool scsipi_xfer_pool;
123 
124 int scsipi_xs_count = 0;
125 
126 /*
127  * scsipi_init:
128  *
129  *	Called when a scsibus or atapibus is attached to the system
130  *	to initialize shared data structures.
131  */
132 void
133 scsipi_init(void)
134 {
135 	static int scsipi_init_done;
136 
137 	if (scsipi_init_done)
138 		return;
139 	scsipi_init_done = 1;
140 
141 	/* Initialize the scsipi_xfer pool. */
142 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
143 	    0, 0, "scxspl", NULL, IPL_BIO);
144 	pool_prime(&scsipi_xfer_pool, 1);
145 
146 	scsipi_ioctl_init();
147 }
148 
149 /*
150  * scsipi_channel_init:
151  *
152  *	Initialize a scsipi_channel when it is attached.
153  */
154 int
155 scsipi_channel_init(struct scsipi_channel *chan)
156 {
157 	struct scsipi_adapter *adapt = chan->chan_adapter;
158 	int i;
159 
160 	/* Initialize shared data. */
161 	scsipi_init();
162 
163 	/* Initialize the queues. */
164 	TAILQ_INIT(&chan->chan_queue);
165 	TAILQ_INIT(&chan->chan_complete);
166 
167 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
168 		LIST_INIT(&chan->chan_periphtab[i]);
169 
170 	/*
171 	 * Create the asynchronous completion thread.
172 	 */
173 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
174 	    &chan->chan_thread, "%s", chan->chan_name)) {
175 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
176 		    "channel %d\n", chan->chan_channel);
177 		panic("scsipi_channel_init");
178 	}
179 
180 	return 0;
181 }
182 
183 /*
184  * scsipi_channel_shutdown:
185  *
186  *	Shutdown a scsipi_channel.
187  */
188 void
189 scsipi_channel_shutdown(struct scsipi_channel *chan)
190 {
191 
192 	mutex_enter(chan_mtx(chan));
193 	/*
194 	 * Shut down the completion thread.
195 	 */
196 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
197 	cv_broadcast(chan_cv_complete(chan));
198 
199 	/*
200 	 * Now wait for the thread to exit.
201 	 */
202 	while (chan->chan_thread != NULL)
203 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
204 	mutex_exit(chan_mtx(chan));
205 }
206 
207 static uint32_t
208 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
209 {
210 	uint32_t hash;
211 
212 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
213 	hash = hash32_buf(&l, sizeof(l), hash);
214 
215 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
216 }
217 
218 /*
219  * scsipi_insert_periph:
220  *
221  *	Insert a periph into the channel.
222  */
223 void
224 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
225 {
226 	uint32_t hash;
227 
228 	hash = scsipi_chan_periph_hash(periph->periph_target,
229 	    periph->periph_lun);
230 
231 	mutex_enter(chan_mtx(chan));
232 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
233 	mutex_exit(chan_mtx(chan));
234 }
235 
236 /*
237  * scsipi_remove_periph:
238  *
239  *	Remove a periph from the channel.
240  */
241 void
242 scsipi_remove_periph(struct scsipi_channel *chan,
243     struct scsipi_periph *periph)
244 {
245 
246 	LIST_REMOVE(periph, periph_hash);
247 }
248 
249 /*
250  * scsipi_lookup_periph:
251  *
252  *	Lookup a periph on the specified channel.
253  */
254 static struct scsipi_periph *
255 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
256 {
257 	struct scsipi_periph *periph;
258 	uint32_t hash;
259 
260 	if (target >= chan->chan_ntargets ||
261 	    lun >= chan->chan_nluns)
262 		return NULL;
263 
264 	hash = scsipi_chan_periph_hash(target, lun);
265 
266 	if (lock)
267 		mutex_enter(chan_mtx(chan));
268 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
269 		if (periph->periph_target == target &&
270 		    periph->periph_lun == lun)
271 			break;
272 	}
273 	if (lock)
274 		mutex_exit(chan_mtx(chan));
275 
276 	return periph;
277 }
278 
279 struct scsipi_periph *
280 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
281 {
282 	return scsipi_lookup_periph_internal(chan, target, lun, false);
283 }
284 
285 struct scsipi_periph *
286 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
287 {
288 	return scsipi_lookup_periph_internal(chan, target, lun, true);
289 }
290 
291 /*
292  * scsipi_get_resource:
293  *
294  *	Allocate a single xfer `resource' from the channel.
295  *
296  *	NOTE: Must be called with channel lock held
297  */
298 static int
299 scsipi_get_resource(struct scsipi_channel *chan)
300 {
301 	struct scsipi_adapter *adapt = chan->chan_adapter;
302 
303 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
304 		if (chan->chan_openings > 0) {
305 			chan->chan_openings--;
306 			return 1;
307 		}
308 		return 0;
309 	}
310 
311 	if (adapt->adapt_openings > 0) {
312 		adapt->adapt_openings--;
313 		return 1;
314 	}
315 	return 0;
316 }
317 
318 /*
319  * scsipi_grow_resources:
320  *
321  *	Attempt to grow resources for a channel.  If this succeeds,
322  *	we allocate one for our caller.
323  *
324  *	NOTE: Must be called with channel lock held
325  */
326 static inline int
327 scsipi_grow_resources(struct scsipi_channel *chan)
328 {
329 
330 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
331 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
332 			mutex_exit(chan_mtx(chan));
333 			scsipi_adapter_request(chan,
334 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
335 			mutex_enter(chan_mtx(chan));
336 			return scsipi_get_resource(chan);
337 		}
338 		/*
339 		 * ask the channel thread to do it. It'll have to thaw the
340 		 * queue
341 		 */
342 		scsipi_channel_freeze_locked(chan, 1);
343 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
344 		cv_broadcast(chan_cv_complete(chan));
345 		return 0;
346 	}
347 
348 	return 0;
349 }
350 
351 /*
352  * scsipi_put_resource:
353  *
354  *	Free a single xfer `resource' to the channel.
355  *
356  *	NOTE: Must be called with channel lock held
357  */
358 static void
359 scsipi_put_resource(struct scsipi_channel *chan)
360 {
361 	struct scsipi_adapter *adapt = chan->chan_adapter;
362 
363 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
364 		chan->chan_openings++;
365 	else
366 		adapt->adapt_openings++;
367 }
368 
369 /*
370  * scsipi_get_tag:
371  *
372  *	Get a tag ID for the specified xfer.
373  *
374  *	NOTE: Must be called with channel lock held
375  */
376 static void
377 scsipi_get_tag(struct scsipi_xfer *xs)
378 {
379 	struct scsipi_periph *periph = xs->xs_periph;
380 	int bit, tag;
381 	u_int word;
382 
383 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
384 
385 	bit = 0;	/* XXX gcc */
386 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
387 		bit = ffs(periph->periph_freetags[word]);
388 		if (bit != 0)
389 			break;
390 	}
391 #ifdef DIAGNOSTIC
392 	if (word == PERIPH_NTAGWORDS) {
393 		scsipi_printaddr(periph);
394 		printf("no free tags\n");
395 		panic("scsipi_get_tag");
396 	}
397 #endif
398 
399 	bit -= 1;
400 	periph->periph_freetags[word] &= ~(1U << bit);
401 	tag = (word << 5) | bit;
402 
403 	/* XXX Should eventually disallow this completely. */
404 	if (tag >= periph->periph_openings) {
405 		scsipi_printaddr(periph);
406 		printf("WARNING: tag %d greater than available openings %d\n",
407 		    tag, periph->periph_openings);
408 	}
409 
410 	xs->xs_tag_id = tag;
411 	SDT_PROBE3(scsi, base, tag, get,
412 	    xs, xs->xs_tag_id, xs->xs_tag_type);
413 }
414 
415 /*
416  * scsipi_put_tag:
417  *
418  *	Put the tag ID for the specified xfer back into the pool.
419  *
420  *	NOTE: Must be called with channel lock held
421  */
422 static void
423 scsipi_put_tag(struct scsipi_xfer *xs)
424 {
425 	struct scsipi_periph *periph = xs->xs_periph;
426 	int word, bit;
427 
428 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
429 
430 	SDT_PROBE3(scsi, base, tag, put,
431 	    xs, xs->xs_tag_id, xs->xs_tag_type);
432 
433 	word = xs->xs_tag_id >> 5;
434 	bit = xs->xs_tag_id & 0x1f;
435 
436 	periph->periph_freetags[word] |= (1U << bit);
437 }
438 
439 /*
440  * scsipi_get_xs:
441  *
442  *	Allocate an xfer descriptor and associate it with the
443  *	specified peripheral.  If the peripheral has no more
444  *	available command openings, we either block waiting for
445  *	one to become available, or fail.
446  *
447  *	When this routine is called with the channel lock held
448  *	the flags must include XS_CTL_NOSLEEP.
449  */
450 struct scsipi_xfer *
451 scsipi_get_xs(struct scsipi_periph *periph, int flags)
452 {
453 	struct scsipi_xfer *xs;
454 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
455 
456 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
457 
458 	KASSERT(!cold);
459 
460 #ifdef DIAGNOSTIC
461 	/*
462 	 * URGENT commands can never be ASYNC.
463 	 */
464 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
465 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
466 		scsipi_printaddr(periph);
467 		printf("URGENT and ASYNC\n");
468 		panic("scsipi_get_xs");
469 	}
470 #endif
471 
472 	/*
473 	 * Wait for a command opening to become available.  Rules:
474 	 *
475 	 *	- All xfers must wait for an available opening.
476 	 *	  Exception: URGENT xfers can proceed when
477 	 *	  active == openings, because we use the opening
478 	 *	  of the command we're recovering for.
479 	 *	- if the periph has sense pending, only URGENT & REQSENSE
480 	 *	  xfers may proceed.
481 	 *
482 	 *	- If the periph is recovering, only URGENT xfers may
483 	 *	  proceed.
484 	 *
485 	 *	- If the periph is currently executing a recovery
486 	 *	  command, URGENT commands must block, because only
487 	 *	  one recovery command can execute at a time.
488 	 */
489 	if (lock)
490 		mutex_enter(chan_mtx(periph->periph_channel));
491 	for (;;) {
492 		if (flags & XS_CTL_URGENT) {
493 			if (periph->periph_active > periph->periph_openings)
494 				goto wait_for_opening;
495 			if (periph->periph_flags & PERIPH_SENSE) {
496 				if ((flags & XS_CTL_REQSENSE) == 0)
497 					goto wait_for_opening;
498 			} else {
499 				if ((periph->periph_flags &
500 				    PERIPH_RECOVERY_ACTIVE) != 0)
501 					goto wait_for_opening;
502 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
503 			}
504 			break;
505 		}
506 		if (periph->periph_active >= periph->periph_openings ||
507 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
508 			goto wait_for_opening;
509 		periph->periph_active++;
510 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
511 		break;
512 
513  wait_for_opening:
514 		if (flags & XS_CTL_NOSLEEP) {
515 			KASSERT(!lock);
516 			return NULL;
517 		}
518 		KASSERT(lock);
519 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
520 		periph->periph_flags |= PERIPH_WAITING;
521 		cv_wait(periph_cv_periph(periph),
522 		    chan_mtx(periph->periph_channel));
523 	}
524 	if (lock)
525 		mutex_exit(chan_mtx(periph->periph_channel));
526 
527 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
528 	xs = pool_get(&scsipi_xfer_pool,
529 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
530 	if (xs == NULL) {
531 		if (lock)
532 			mutex_enter(chan_mtx(periph->periph_channel));
533 		if (flags & XS_CTL_URGENT) {
534 			if ((flags & XS_CTL_REQSENSE) == 0)
535 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
536 		} else
537 			periph->periph_active--;
538 		if (lock)
539 			mutex_exit(chan_mtx(periph->periph_channel));
540 		scsipi_printaddr(periph);
541 		printf("unable to allocate %sscsipi_xfer\n",
542 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
543 	}
544 
545 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
546 
547 	if (xs != NULL) {
548 		memset(xs, 0, sizeof(*xs));
549 		callout_init(&xs->xs_callout, 0);
550 		xs->xs_periph = periph;
551 		xs->xs_control = flags;
552 		xs->xs_status = 0;
553 		if ((flags & XS_CTL_NOSLEEP) == 0)
554 			mutex_enter(chan_mtx(periph->periph_channel));
555 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
556 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
557 		if ((flags & XS_CTL_NOSLEEP) == 0)
558 			mutex_exit(chan_mtx(periph->periph_channel));
559 	}
560 	return xs;
561 }
562 
563 /*
564  * scsipi_put_xs:
565  *
566  *	Release an xfer descriptor, decreasing the outstanding command
567  *	count for the peripheral.  If there is a thread waiting for
568  *	an opening, wake it up.  If not, kick any queued I/O the
569  *	peripheral may have.
570  *
571  *	NOTE: Must be called with channel lock held
572  */
573 void
574 scsipi_put_xs(struct scsipi_xfer *xs)
575 {
576 	struct scsipi_periph *periph = xs->xs_periph;
577 	int flags = xs->xs_control;
578 
579 	SDT_PROBE1(scsi, base, xfer, free,  xs);
580 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
581 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
582 
583 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
584 	callout_destroy(&xs->xs_callout);
585 	pool_put(&scsipi_xfer_pool, xs);
586 
587 #ifdef DIAGNOSTIC
588 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
589 	    periph->periph_active == 0) {
590 		scsipi_printaddr(periph);
591 		printf("recovery without a command to recovery for\n");
592 		panic("scsipi_put_xs");
593 	}
594 #endif
595 
596 	if (flags & XS_CTL_URGENT) {
597 		if ((flags & XS_CTL_REQSENSE) == 0)
598 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
599 	} else
600 		periph->periph_active--;
601 	if (periph->periph_active == 0 &&
602 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
603 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
604 		cv_broadcast(periph_cv_active(periph));
605 	}
606 
607 	if (periph->periph_flags & PERIPH_WAITING) {
608 		periph->periph_flags &= ~PERIPH_WAITING;
609 		cv_broadcast(periph_cv_periph(periph));
610 	} else {
611 		if (periph->periph_switch->psw_start != NULL &&
612 		    device_is_active(periph->periph_dev)) {
613 			SC_DEBUG(periph, SCSIPI_DB2,
614 			    ("calling private start()\n"));
615 			(*periph->periph_switch->psw_start)(periph);
616 		}
617 	}
618 }
619 
620 /*
621  * scsipi_channel_freeze:
622  *
623  *	Freeze a channel's xfer queue.
624  */
625 void
626 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
627 {
628 	bool lock = chan_running(chan) > 0;
629 
630 	if (lock)
631 		mutex_enter(chan_mtx(chan));
632 	chan->chan_qfreeze += count;
633 	if (lock)
634 		mutex_exit(chan_mtx(chan));
635 }
636 
637 static void
638 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
639 {
640 
641 	chan->chan_qfreeze += count;
642 }
643 
644 /*
645  * scsipi_channel_thaw:
646  *
647  *	Thaw a channel's xfer queue.
648  */
649 void
650 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
651 {
652 	bool lock = chan_running(chan) > 0;
653 
654 	if (lock)
655 		mutex_enter(chan_mtx(chan));
656 	chan->chan_qfreeze -= count;
657 	/*
658 	 * Don't let the freeze count go negative.
659 	 *
660 	 * Presumably the adapter driver could keep track of this,
661 	 * but it might just be easier to do this here so as to allow
662 	 * multiple callers, including those outside the adapter driver.
663 	 */
664 	if (chan->chan_qfreeze < 0) {
665 		chan->chan_qfreeze = 0;
666 	}
667 	if (lock)
668 		mutex_exit(chan_mtx(chan));
669 
670 	/*
671 	 * until the channel is running
672 	 */
673 	if (!lock)
674 		return;
675 
676 	/*
677 	 * Kick the channel's queue here.  Note, we may be running in
678 	 * interrupt context (softclock or HBA's interrupt), so the adapter
679 	 * driver had better not sleep.
680 	 */
681 	if (chan->chan_qfreeze == 0)
682 		scsipi_run_queue(chan);
683 }
684 
685 /*
686  * scsipi_channel_timed_thaw:
687  *
688  *	Thaw a channel after some time has expired. This will also
689  * 	run the channel's queue if the freeze count has reached 0.
690  */
691 void
692 scsipi_channel_timed_thaw(void *arg)
693 {
694 	struct scsipi_channel *chan = arg;
695 
696 	scsipi_channel_thaw(chan, 1);
697 }
698 
699 /*
700  * scsipi_periph_freeze:
701  *
702  *	Freeze a device's xfer queue.
703  */
704 void
705 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
706 {
707 
708 	periph->periph_qfreeze += count;
709 }
710 
711 /*
712  * scsipi_periph_thaw:
713  *
714  *	Thaw a device's xfer queue.
715  */
716 void
717 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
718 {
719 
720 	periph->periph_qfreeze -= count;
721 #ifdef DIAGNOSTIC
722 	if (periph->periph_qfreeze < 0) {
723 		static const char pc[] = "periph freeze count < 0";
724 		scsipi_printaddr(periph);
725 		printf("%s\n", pc);
726 		panic(pc);
727 	}
728 #endif
729 	if (periph->periph_qfreeze == 0 &&
730 	    (periph->periph_flags & PERIPH_WAITING) != 0)
731 		cv_broadcast(periph_cv_periph(periph));
732 }
733 
734 void
735 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
736 {
737 
738 	mutex_enter(chan_mtx(periph->periph_channel));
739 	scsipi_periph_freeze_locked(periph, count);
740 	mutex_exit(chan_mtx(periph->periph_channel));
741 }
742 
743 void
744 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
745 {
746 
747 	mutex_enter(chan_mtx(periph->periph_channel));
748 	scsipi_periph_thaw_locked(periph, count);
749 	mutex_exit(chan_mtx(periph->periph_channel));
750 }
751 
752 /*
753  * scsipi_periph_timed_thaw:
754  *
755  *	Thaw a device after some time has expired.
756  */
757 void
758 scsipi_periph_timed_thaw(void *arg)
759 {
760 	struct scsipi_periph *periph = arg;
761 	struct scsipi_channel *chan = periph->periph_channel;
762 
763 	callout_stop(&periph->periph_callout);
764 
765 	mutex_enter(chan_mtx(chan));
766 	scsipi_periph_thaw_locked(periph, 1);
767 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
768 		/*
769 		 * Kick the channel's queue here.  Note, we're running in
770 		 * interrupt context (softclock), so the adapter driver
771 		 * had better not sleep.
772 		 */
773 		mutex_exit(chan_mtx(chan));
774 		scsipi_run_queue(periph->periph_channel);
775 	} else {
776 		/*
777 		 * Tell the completion thread to kick the channel's queue here.
778 		 */
779 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
780 		cv_broadcast(chan_cv_complete(chan));
781 		mutex_exit(chan_mtx(chan));
782 	}
783 }
784 
785 /*
786  * scsipi_wait_drain:
787  *
788  *	Wait for a periph's pending xfers to drain.
789  */
790 void
791 scsipi_wait_drain(struct scsipi_periph *periph)
792 {
793 	struct scsipi_channel *chan = periph->periph_channel;
794 
795 	mutex_enter(chan_mtx(chan));
796 	while (periph->periph_active != 0) {
797 		periph->periph_flags |= PERIPH_WAITDRAIN;
798 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
799 	}
800 	mutex_exit(chan_mtx(chan));
801 }
802 
803 /*
804  * scsipi_kill_pending:
805  *
806  *	Kill off all pending xfers for a periph.
807  *
808  *	NOTE: Must be called with channel lock held
809  */
810 void
811 scsipi_kill_pending(struct scsipi_periph *periph)
812 {
813 	struct scsipi_channel *chan = periph->periph_channel;
814 
815 	(*chan->chan_bustype->bustype_kill_pending)(periph);
816 	while (periph->periph_active != 0) {
817 		periph->periph_flags |= PERIPH_WAITDRAIN;
818 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
819 	}
820 }
821 
822 /*
823  * scsipi_print_cdb:
824  * prints a command descriptor block (for debug purpose, error messages,
825  * SCSIVERBOSE, ...)
826  */
827 void
828 scsipi_print_cdb(struct scsipi_generic *cmd)
829 {
830 	int i, j;
831 
832  	printf("0x%02x", cmd->opcode);
833 
834  	switch (CDB_GROUPID(cmd->opcode)) {
835  	case CDB_GROUPID_0:
836  		j = CDB_GROUP0;
837  		break;
838  	case CDB_GROUPID_1:
839  		j = CDB_GROUP1;
840  		break;
841  	case CDB_GROUPID_2:
842  		j = CDB_GROUP2;
843  		break;
844  	case CDB_GROUPID_3:
845  		j = CDB_GROUP3;
846  		break;
847  	case CDB_GROUPID_4:
848  		j = CDB_GROUP4;
849  		break;
850  	case CDB_GROUPID_5:
851  		j = CDB_GROUP5;
852  		break;
853  	case CDB_GROUPID_6:
854  		j = CDB_GROUP6;
855  		break;
856  	case CDB_GROUPID_7:
857  		j = CDB_GROUP7;
858  		break;
859  	default:
860  		j = 0;
861  	}
862  	if (j == 0)
863  		j = sizeof (cmd->bytes);
864  	for (i = 0; i < j-1; i++) /* already done the opcode */
865  		printf(" %02x", cmd->bytes[i]);
866 }
867 
868 /*
869  * scsipi_interpret_sense:
870  *
871  *	Look at the returned sense and act on the error, determining
872  *	the unix error number to pass back.  (0 = report no error)
873  *
874  *	NOTE: If we return ERESTART, we are expected to have
875  *	thawed the device!
876  *
877  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
878  */
879 int
880 scsipi_interpret_sense(struct scsipi_xfer *xs)
881 {
882 	struct scsi_sense_data *sense;
883 	struct scsipi_periph *periph = xs->xs_periph;
884 	u_int8_t key;
885 	int error;
886 	u_int32_t info;
887 	static const char *error_mes[] = {
888 		"soft error (corrected)",
889 		"not ready", "medium error",
890 		"non-media hardware failure", "illegal request",
891 		"unit attention", "readonly device",
892 		"no data found", "vendor unique",
893 		"copy aborted", "command aborted",
894 		"search returned equal", "volume overflow",
895 		"verify miscompare", "unknown error key"
896 	};
897 
898 	sense = &xs->sense.scsi_sense;
899 #ifdef SCSIPI_DEBUG
900 	if (periph->periph_flags & SCSIPI_DB1) {
901 	        int count, len;
902 		scsipi_printaddr(periph);
903 		printf(" sense debug information:\n");
904 		printf("\tcode 0x%x valid %d\n",
905 			SSD_RCODE(sense->response_code),
906 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
907 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
908 			sense->segment,
909 			SSD_SENSE_KEY(sense->flags),
910 			sense->flags & SSD_ILI ? 1 : 0,
911 			sense->flags & SSD_EOM ? 1 : 0,
912 			sense->flags & SSD_FILEMARK ? 1 : 0);
913 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
914 			"extra bytes\n",
915 			sense->info[0],
916 			sense->info[1],
917 			sense->info[2],
918 			sense->info[3],
919 			sense->extra_len);
920 		len = SSD_ADD_BYTES_LIM(sense);
921 		printf("\textra (up to %d bytes): ", len);
922 		for (count = 0; count < len; count++)
923 			printf("0x%x ", sense->csi[count]);
924 		printf("\n");
925 	}
926 #endif
927 
928 	/*
929 	 * If the periph has its own error handler, call it first.
930 	 * If it returns a legit error value, return that, otherwise
931 	 * it wants us to continue with normal error processing.
932 	 */
933 	if (periph->periph_switch->psw_error != NULL) {
934 		SC_DEBUG(periph, SCSIPI_DB2,
935 		    ("calling private err_handler()\n"));
936 		error = (*periph->periph_switch->psw_error)(xs);
937 		if (error != EJUSTRETURN)
938 			return error;
939 	}
940 	/* otherwise use the default */
941 	switch (SSD_RCODE(sense->response_code)) {
942 
943 		/*
944 		 * Old SCSI-1 and SASI devices respond with
945 		 * codes other than 70.
946 		 */
947 	case 0x00:		/* no error (command completed OK) */
948 		return 0;
949 	case 0x04:		/* drive not ready after it was selected */
950 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
951 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
952 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
953 			return 0;
954 		/* XXX - display some sort of error here? */
955 		return EIO;
956 	case 0x20:		/* invalid command */
957 		if ((xs->xs_control &
958 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
959 			return 0;
960 		return EINVAL;
961 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
962 		return EACCES;
963 
964 		/*
965 		 * If it's code 70, use the extended stuff and
966 		 * interpret the key
967 		 */
968 	case 0x71:		/* delayed error */
969 		scsipi_printaddr(periph);
970 		key = SSD_SENSE_KEY(sense->flags);
971 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
972 		/* FALLTHROUGH */
973 	case 0x70:
974 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
975 			info = _4btol(sense->info);
976 		else
977 			info = 0;
978 		key = SSD_SENSE_KEY(sense->flags);
979 
980 		switch (key) {
981 		case SKEY_NO_SENSE:
982 		case SKEY_RECOVERED_ERROR:
983 			if (xs->resid == xs->datalen && xs->datalen) {
984 				/*
985 				 * Why is this here?
986 				 */
987 				xs->resid = 0;	/* not short read */
988 			}
989 			error = 0;
990 			break;
991 		case SKEY_EQUAL:
992 			error = 0;
993 			break;
994 		case SKEY_NOT_READY:
995 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
996 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
997 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
998 				return 0;
999 			if (sense->asc == 0x3A) {
1000 				error = ENODEV; /* Medium not present */
1001 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
1002 					return error;
1003 			} else
1004 				error = EIO;
1005 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1006 				return error;
1007 			break;
1008 		case SKEY_ILLEGAL_REQUEST:
1009 			if ((xs->xs_control &
1010 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
1011 				return 0;
1012 			/*
1013 			 * Handle the case where a device reports
1014 			 * Logical Unit Not Supported during discovery.
1015 			 */
1016 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
1017 			    sense->asc == 0x25 &&
1018 			    sense->ascq == 0x00)
1019 				return EINVAL;
1020 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1021 				return EIO;
1022 			error = EINVAL;
1023 			break;
1024 		case SKEY_UNIT_ATTENTION:
1025 			if (sense->asc == 0x29 &&
1026 			    sense->ascq == 0x00) {
1027 				/* device or bus reset */
1028 				return ERESTART;
1029 			}
1030 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
1031 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
1032 			if ((xs->xs_control &
1033 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
1034 				/* XXX Should reupload any transient state. */
1035 				(periph->periph_flags &
1036 				 PERIPH_REMOVABLE) == 0) {
1037 				return ERESTART;
1038 			}
1039 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1040 				return EIO;
1041 			error = EIO;
1042 			break;
1043 		case SKEY_DATA_PROTECT:
1044 			error = EROFS;
1045 			break;
1046 		case SKEY_BLANK_CHECK:
1047 			error = 0;
1048 			break;
1049 		case SKEY_ABORTED_COMMAND:
1050 			if (xs->xs_retries != 0) {
1051 				xs->xs_retries--;
1052 				error = ERESTART;
1053 			} else
1054 				error = EIO;
1055 			break;
1056 		case SKEY_VOLUME_OVERFLOW:
1057 			error = ENOSPC;
1058 			break;
1059 		default:
1060 			error = EIO;
1061 			break;
1062 		}
1063 
1064 		/* Print verbose decode if appropriate and possible */
1065 		if ((key == 0) ||
1066 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1067 		    (scsipi_print_sense(xs, 0) != 0))
1068 			return error;
1069 
1070 		/* Print brief(er) sense information */
1071 		scsipi_printaddr(periph);
1072 		printf("%s", error_mes[key - 1]);
1073 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1074 			switch (key) {
1075 			case SKEY_NOT_READY:
1076 			case SKEY_ILLEGAL_REQUEST:
1077 			case SKEY_UNIT_ATTENTION:
1078 			case SKEY_DATA_PROTECT:
1079 				break;
1080 			case SKEY_BLANK_CHECK:
1081 				printf(", requested size: %d (decimal)",
1082 				    info);
1083 				break;
1084 			case SKEY_ABORTED_COMMAND:
1085 				if (xs->xs_retries)
1086 					printf(", retrying");
1087 				printf(", cmd 0x%x, info 0x%x",
1088 				    xs->cmd->opcode, info);
1089 				break;
1090 			default:
1091 				printf(", info = %d (decimal)", info);
1092 			}
1093 		}
1094 		if (sense->extra_len != 0) {
1095 			int n;
1096 			printf(", data =");
1097 			for (n = 0; n < sense->extra_len; n++)
1098 				printf(" %02x",
1099 				    sense->csi[n]);
1100 		}
1101 		printf("\n");
1102 		return error;
1103 
1104 	/*
1105 	 * Some other code, just report it
1106 	 */
1107 	default:
1108 #if    defined(SCSIDEBUG) || defined(DEBUG)
1109 	{
1110 		static const char *uc = "undecodable sense error";
1111 		int i;
1112 		u_int8_t *cptr = (u_int8_t *) sense;
1113 		scsipi_printaddr(periph);
1114 		if (xs->cmd == &xs->cmdstore) {
1115 			printf("%s for opcode 0x%x, data=",
1116 			    uc, xs->cmdstore.opcode);
1117 		} else {
1118 			printf("%s, data=", uc);
1119 		}
1120 		for (i = 0; i < sizeof (sense); i++)
1121 			printf(" 0x%02x", *(cptr++) & 0xff);
1122 		printf("\n");
1123 	}
1124 #else
1125 		scsipi_printaddr(periph);
1126 		printf("Sense Error Code 0x%x",
1127 			SSD_RCODE(sense->response_code));
1128 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1129 			struct scsi_sense_data_unextended *usense =
1130 			    (struct scsi_sense_data_unextended *)sense;
1131 			printf(" at block no. %d (decimal)",
1132 			    _3btol(usense->block));
1133 		}
1134 		printf("\n");
1135 #endif
1136 		return EIO;
1137 	}
1138 }
1139 
1140 /*
1141  * scsipi_test_unit_ready:
1142  *
1143  *	Issue a `test unit ready' request.
1144  */
1145 int
1146 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1147 {
1148 	struct scsi_test_unit_ready cmd;
1149 	int retries;
1150 
1151 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1152 	if (periph->periph_quirks & PQUIRK_NOTUR)
1153 		return 0;
1154 
1155 	if (flags & XS_CTL_DISCOVERY)
1156 		retries = 0;
1157 	else
1158 		retries = SCSIPIRETRIES;
1159 
1160 	memset(&cmd, 0, sizeof(cmd));
1161 	cmd.opcode = SCSI_TEST_UNIT_READY;
1162 
1163 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1164 	    retries, 10000, NULL, flags);
1165 }
1166 
1167 static const struct scsipi_inquiry3_pattern {
1168 	const char vendor[8];
1169 	const char product[16];
1170 	const char revision[4];
1171 } scsipi_inquiry3_quirk[] = {
1172 	{ "ES-6600 ", "", "" },
1173 };
1174 
1175 static int
1176 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1177 {
1178 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1179 		const struct scsipi_inquiry3_pattern *q =
1180 		    &scsipi_inquiry3_quirk[i];
1181 #define MATCH(field) \
1182     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1183 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1184 			return 0;
1185 	}
1186 	return 1;
1187 }
1188 
1189 /*
1190  * scsipi_inquire:
1191  *
1192  *	Ask the device about itself.
1193  */
1194 int
1195 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1196     int flags)
1197 {
1198 	struct scsipi_inquiry cmd;
1199 	int error;
1200 	int retries;
1201 
1202 	if (flags & XS_CTL_DISCOVERY)
1203 		retries = 0;
1204 	else
1205 		retries = SCSIPIRETRIES;
1206 
1207 	/*
1208 	 * If we request more data than the device can provide, it SHOULD just
1209 	 * return a short response.  However, some devices error with an
1210 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1211 	 * failure modes (such as the GL641USB flash adapter, which goes loony
1212 	 * and sends corrupted CRCs).  To work around this, and to bring our
1213 	 * behavior more in line with other OSes, we do a shorter inquiry,
1214 	 * covering all the SCSI-2 information, first, and then request more
1215 	 * data iff the "additional length" field indicates there is more.
1216 	 * - mycroft, 2003/10/16
1217 	 */
1218 	memset(&cmd, 0, sizeof(cmd));
1219 	cmd.opcode = INQUIRY;
1220 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1221 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1222 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1223 	    10000, NULL, flags | XS_CTL_DATA_IN);
1224 	if (!error &&
1225 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1226 	    if (scsipi_inquiry3_ok(inqbuf)) {
1227 #if 0
1228 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1229 #endif
1230 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1231 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1232 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1233 		    10000, NULL, flags | XS_CTL_DATA_IN);
1234 #if 0
1235 printf("inquire: error=%d\n", error);
1236 #endif
1237 	    }
1238 	}
1239 
1240 #ifdef SCSI_OLD_NOINQUIRY
1241 	/*
1242 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1243 	 * This board doesn't support the INQUIRY command at all.
1244 	 */
1245 	if (error == EINVAL || error == EACCES) {
1246 		/*
1247 		 * Conjure up an INQUIRY response.
1248 		 */
1249 		inqbuf->device = (error == EINVAL ?
1250 			 SID_QUAL_LU_PRESENT :
1251 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1252 		inqbuf->dev_qual2 = 0;
1253 		inqbuf->version = 0;
1254 		inqbuf->response_format = SID_FORMAT_SCSI1;
1255 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1256 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1257 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1258 		error = 0;
1259 	}
1260 
1261 	/*
1262 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1263 	 * This board gives an empty response to an INQUIRY command.
1264 	 */
1265 	else if (error == 0 &&
1266 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1267 	    inqbuf->dev_qual2 == 0 &&
1268 	    inqbuf->version == 0 &&
1269 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1270 		/*
1271 		 * Fill out the INQUIRY response.
1272 		 */
1273 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1274 		inqbuf->dev_qual2 = SID_REMOVABLE;
1275 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1276 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1277 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1278 	}
1279 #endif /* SCSI_OLD_NOINQUIRY */
1280 
1281 	return error;
1282 }
1283 
1284 /*
1285  * scsipi_prevent:
1286  *
1287  *	Prevent or allow the user to remove the media
1288  */
1289 int
1290 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1291 {
1292 	struct scsi_prevent_allow_medium_removal cmd;
1293 
1294 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1295 		return 0;
1296 
1297 	memset(&cmd, 0, sizeof(cmd));
1298 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1299 	cmd.how = type;
1300 
1301 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1302 	    SCSIPIRETRIES, 5000, NULL, flags));
1303 }
1304 
1305 /*
1306  * scsipi_start:
1307  *
1308  *	Send a START UNIT.
1309  */
1310 int
1311 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1312 {
1313 	struct scsipi_start_stop cmd;
1314 
1315 	memset(&cmd, 0, sizeof(cmd));
1316 	cmd.opcode = START_STOP;
1317 	cmd.byte2 = 0x00;
1318 	cmd.how = type;
1319 
1320 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1321 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1322 }
1323 
1324 /*
1325  * scsipi_mode_sense, scsipi_mode_sense_big:
1326  *	get a sense page from a device
1327  */
1328 
1329 int
1330 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1331     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1332     int timeout)
1333 {
1334 	struct scsi_mode_sense_6 cmd;
1335 
1336 	memset(&cmd, 0, sizeof(cmd));
1337 	cmd.opcode = SCSI_MODE_SENSE_6;
1338 	cmd.byte2 = byte2;
1339 	cmd.page = page;
1340 	cmd.length = len & 0xff;
1341 
1342 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1343 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1344 }
1345 
1346 int
1347 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1348     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1349     int timeout)
1350 {
1351 	struct scsi_mode_sense_10 cmd;
1352 
1353 	memset(&cmd, 0, sizeof(cmd));
1354 	cmd.opcode = SCSI_MODE_SENSE_10;
1355 	cmd.byte2 = byte2;
1356 	cmd.page = page;
1357 	_lto2b(len, cmd.length);
1358 
1359 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1360 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1361 }
1362 
1363 int
1364 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1365     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1366     int timeout)
1367 {
1368 	struct scsi_mode_select_6 cmd;
1369 
1370 	memset(&cmd, 0, sizeof(cmd));
1371 	cmd.opcode = SCSI_MODE_SELECT_6;
1372 	cmd.byte2 = byte2;
1373 	cmd.length = len & 0xff;
1374 
1375 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1376 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1377 }
1378 
1379 int
1380 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1381     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1382     int timeout)
1383 {
1384 	struct scsi_mode_select_10 cmd;
1385 
1386 	memset(&cmd, 0, sizeof(cmd));
1387 	cmd.opcode = SCSI_MODE_SELECT_10;
1388 	cmd.byte2 = byte2;
1389 	_lto2b(len, cmd.length);
1390 
1391 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1392 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1393 }
1394 
1395 /*
1396  * scsipi_get_opcodeinfo:
1397  *
1398  * query the device for supported commands and their timeout
1399  * building a timeout lookup table if timeout information is available.
1400  */
1401 void
1402 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
1403 {
1404 	u_int8_t *data;
1405 	int len = 16*1024;
1406 	int rc;
1407 	int retries;
1408 	struct scsi_repsuppopcode cmd;
1409 
1410 	/* refrain from asking for supported opcodes */
1411 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
1412 	    periph->periph_type == T_PROCESSOR || /* spec. */
1413 	    periph->periph_type == T_CDROM) /* spec. */
1414 		return;
1415 
1416 	scsipi_free_opcodeinfo(periph);
1417 
1418 	/*
1419 	 * query REPORT SUPPORTED OPERATION CODES
1420 	 * if OK
1421 	 *   enumerate all codes
1422 	 *     if timeout exists insert maximum into opcode table
1423 	 */
1424 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
1425 
1426 	memset(&cmd, 0, sizeof(cmd));
1427 	cmd.opcode = SCSI_MAINTENANCE_IN;
1428 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
1429 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
1430 	_lto4b(len, cmd.alloclen);
1431 
1432 	/* loop to skip any UNIT ATTENTIONS at this point */
1433 	retries = 3;
1434 	do {
1435 		rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1436 				    (void *)data, len, 0, 60000, NULL,
1437 				    XS_CTL_DATA_IN|XS_CTL_SILENT);
1438 #ifdef SCSIPI_DEBUG
1439 		if (rc != 0) {
1440 			SC_DEBUG(periph, SCSIPI_DB3,
1441 				("SCSI_MAINTENANCE_IN"
1442 			 	"[RSOC_REPORT_SUPPORTED_OPCODES] command"
1443 				" failed: rc=%d, retries=%d\n",
1444 				rc, retries));
1445 		}
1446 #endif
1447         } while (rc == EIO && retries-- > 0);
1448 
1449 	if (rc == 0) {
1450 		int count;
1451                 int dlen = _4btol(data);
1452                 u_int8_t *c = data + 4;
1453 
1454 		SC_DEBUG(periph, SCSIPI_DB3,
1455 			 ("supported opcode timeout-values loaded\n"));
1456 		SC_DEBUG(periph, SCSIPI_DB3,
1457 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
1458 
1459 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
1460 		    M_DEVBUF, M_WAITOK|M_ZERO);
1461 
1462 		count = 0;
1463                 while (tot != NULL &&
1464 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
1465                         struct scsi_repsupopcode_all_commands_descriptor *acd
1466 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
1467 #ifdef SCSIPI_DEBUG
1468                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
1469 #endif
1470                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1471                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1472                         SC_DEBUG(periph, SCSIPI_DB3,
1473 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
1474 
1475 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
1476 
1477                         if (acd->flags & RSOC_ACD_SERVACTV) {
1478                                 SC_DEBUGN(periph, SCSIPI_DB3,
1479 					 ("0x%02x%02x ",
1480 					  acd->serviceaction[0],
1481 					  acd->serviceaction[1]));
1482                         } else {
1483 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
1484                         }
1485 
1486                         if (acd->flags & RSOC_ACD_CTDP
1487 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
1488                                 struct scsi_repsupopcode_timeouts_descriptor *td
1489 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
1490                                 long nomto = _4btol(td->nom_process_timeout);
1491                                 long cmdto = _4btol(td->cmd_process_timeout);
1492 				long t = (cmdto > nomto) ? cmdto : nomto;
1493 
1494                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1495                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1496 
1497                                 SC_DEBUGN(periph, SCSIPI_DB3,
1498 					  ("0x%02x %10ld %10ld",
1499 					   td->cmd_specific,
1500 					   nomto, cmdto));
1501 
1502 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
1503 					tot->opcode_info[acd->opcode].ti_timeout = t;
1504 					++count;
1505 				}
1506                         }
1507                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
1508                 }
1509 
1510 		if (count > 0) {
1511 			periph->periph_opcs = tot;
1512 		} else {
1513 			free(tot, M_DEVBUF);
1514 			SC_DEBUG(periph, SCSIPI_DB3,
1515 			 	("no usable timeout values available\n"));
1516 		}
1517 	} else {
1518 		SC_DEBUG(periph, SCSIPI_DB3,
1519 			 ("SCSI_MAINTENANCE_IN"
1520 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
1521 			  " - no device provided timeout "
1522 			  "values available\n", rc));
1523 	}
1524 
1525 	free(data, M_DEVBUF);
1526 }
1527 
1528 /*
1529  * scsipi_update_timeouts:
1530  * 	Override timeout value if device/config provided
1531  *      timeouts are available.
1532  */
1533 static void
1534 scsipi_update_timeouts(struct scsipi_xfer *xs)
1535 {
1536 	struct scsipi_opcodes *opcs;
1537 	u_int8_t cmd;
1538 	int timeout;
1539 	struct scsipi_opinfo *oi;
1540 
1541 	if (xs->timeout <= 0) {
1542 		return;
1543 	}
1544 
1545 	opcs = xs->xs_periph->periph_opcs;
1546 
1547 	if (opcs == NULL) {
1548 		return;
1549 	}
1550 
1551 	cmd = xs->cmd->opcode;
1552 	oi = &opcs->opcode_info[cmd];
1553 
1554 	timeout = 1000 * (int)oi->ti_timeout;
1555 
1556 
1557 	if (timeout > xs->timeout && timeout < 86400000) {
1558 		/*
1559 		 * pick up device configured timeouts if they
1560 		 * are longer than the requested ones but less
1561 		 * than a day
1562 		 */
1563 #ifdef SCSIPI_DEBUG
1564 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
1565 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
1566 				 ("Overriding command 0x%02x "
1567 				  "timeout of %d with %d ms\n",
1568 				  cmd, xs->timeout, timeout));
1569 			oi->ti_flags |= SCSIPI_TI_LOGGED;
1570 		}
1571 #endif
1572 		xs->timeout = timeout;
1573 	}
1574 }
1575 
1576 /*
1577  * scsipi_free_opcodeinfo:
1578  *
1579  * free the opcode information table
1580  */
1581 void
1582 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
1583 {
1584 	if (periph->periph_opcs != NULL) {
1585 		free(periph->periph_opcs, M_DEVBUF);
1586 	}
1587 
1588 	periph->periph_opcs = NULL;
1589 }
1590 
1591 /*
1592  * scsipi_done:
1593  *
1594  *	This routine is called by an adapter's interrupt handler when
1595  *	an xfer is completed.
1596  */
1597 void
1598 scsipi_done(struct scsipi_xfer *xs)
1599 {
1600 	struct scsipi_periph *periph = xs->xs_periph;
1601 	struct scsipi_channel *chan = periph->periph_channel;
1602 	int freezecnt;
1603 
1604 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1605 #ifdef SCSIPI_DEBUG
1606 	if (periph->periph_dbflags & SCSIPI_DB1)
1607 		show_scsipi_cmd(xs);
1608 #endif
1609 
1610 	mutex_enter(chan_mtx(chan));
1611 	SDT_PROBE1(scsi, base, xfer, done,  xs);
1612 	/*
1613 	 * The resource this command was using is now free.
1614 	 */
1615 	if (xs->xs_status & XS_STS_DONE) {
1616 		/* XXX in certain circumstances, such as a device
1617 		 * being detached, a xs that has already been
1618 		 * scsipi_done()'d by the main thread will be done'd
1619 		 * again by scsibusdetach(). Putting the xs on the
1620 		 * chan_complete queue causes list corruption and
1621 		 * everyone dies. This prevents that, but perhaps
1622 		 * there should be better coordination somewhere such
1623 		 * that this won't ever happen (and can be turned into
1624 		 * a KASSERT().
1625 		 */
1626 		SDT_PROBE1(scsi, base, xfer, redone,  xs);
1627 		mutex_exit(chan_mtx(chan));
1628 		goto out;
1629 	}
1630 	scsipi_put_resource(chan);
1631 	xs->xs_periph->periph_sent--;
1632 
1633 	/*
1634 	 * If the command was tagged, free the tag.
1635 	 */
1636 	if (XS_CTL_TAGTYPE(xs) != 0)
1637 		scsipi_put_tag(xs);
1638 	else
1639 		periph->periph_flags &= ~PERIPH_UNTAG;
1640 
1641 	/* Mark the command as `done'. */
1642 	xs->xs_status |= XS_STS_DONE;
1643 
1644 #ifdef DIAGNOSTIC
1645 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1646 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1647 		panic("scsipi_done: ASYNC and POLL");
1648 #endif
1649 
1650 	/*
1651 	 * If the xfer had an error of any sort, freeze the
1652 	 * periph's queue.  Freeze it again if we were requested
1653 	 * to do so in the xfer.
1654 	 */
1655 	freezecnt = 0;
1656 	if (xs->error != XS_NOERROR)
1657 		freezecnt++;
1658 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1659 		freezecnt++;
1660 	if (freezecnt != 0)
1661 		scsipi_periph_freeze_locked(periph, freezecnt);
1662 
1663 	/*
1664 	 * record the xfer with a pending sense, in case a SCSI reset is
1665 	 * received before the thread is waked up.
1666 	 */
1667 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1668 		periph->periph_flags |= PERIPH_SENSE;
1669 		periph->periph_xscheck = xs;
1670 	}
1671 
1672 	/*
1673 	 * If this was an xfer that was not to complete asynchronously,
1674 	 * let the requesting thread perform error checking/handling
1675 	 * in its context.
1676 	 */
1677 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1678 		/*
1679 		 * If it's a polling job, just return, to unwind the
1680 		 * call graph.  We don't need to restart the queue,
1681 		 * because polling jobs are treated specially, and
1682 		 * are really only used during crash dumps anyway
1683 		 * (XXX or during boot-time autoconfiguration of
1684 		 * ATAPI devices).
1685 		 */
1686 		if (xs->xs_control & XS_CTL_POLL) {
1687 			mutex_exit(chan_mtx(chan));
1688 			return;
1689 		}
1690 		cv_broadcast(xs_cv(xs));
1691 		mutex_exit(chan_mtx(chan));
1692 		goto out;
1693 	}
1694 
1695 	/*
1696 	 * Catch the extremely common case of I/O completing
1697 	 * without error; no use in taking a context switch
1698 	 * if we can handle it in interrupt context.
1699 	 */
1700 	if (xs->error == XS_NOERROR) {
1701 		mutex_exit(chan_mtx(chan));
1702 		(void) scsipi_complete(xs);
1703 		goto out;
1704 	}
1705 
1706 	/*
1707 	 * There is an error on this xfer.  Put it on the channel's
1708 	 * completion queue, and wake up the completion thread.
1709 	 */
1710 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1711 	cv_broadcast(chan_cv_complete(chan));
1712 	mutex_exit(chan_mtx(chan));
1713 
1714  out:
1715 	/*
1716 	 * If there are more xfers on the channel's queue, attempt to
1717 	 * run them.
1718 	 */
1719 	scsipi_run_queue(chan);
1720 }
1721 
1722 /*
1723  * scsipi_complete:
1724  *
1725  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1726  *
1727  *	NOTE: This routine MUST be called with valid thread context
1728  *	except for the case where the following two conditions are
1729  *	true:
1730  *
1731  *		xs->error == XS_NOERROR
1732  *		XS_CTL_ASYNC is set in xs->xs_control
1733  *
1734  *	The semantics of this routine can be tricky, so here is an
1735  *	explanation:
1736  *
1737  *		0		Xfer completed successfully.
1738  *
1739  *		ERESTART	Xfer had an error, but was restarted.
1740  *
1741  *		anything else	Xfer had an error, return value is Unix
1742  *				errno.
1743  *
1744  *	If the return value is anything but ERESTART:
1745  *
1746  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1747  *		  the pool.
1748  *		- If there is a buf associated with the xfer,
1749  *		  it has been biodone()'d.
1750  */
1751 static int
1752 scsipi_complete(struct scsipi_xfer *xs)
1753 {
1754 	struct scsipi_periph *periph = xs->xs_periph;
1755 	struct scsipi_channel *chan = periph->periph_channel;
1756 	int error;
1757 
1758 	SDT_PROBE1(scsi, base, xfer, complete,  xs);
1759 
1760 #ifdef DIAGNOSTIC
1761 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1762 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1763 #endif
1764 	/*
1765 	 * If command terminated with a CHECK CONDITION, we need to issue a
1766 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1767 	 * we'll have the real status.
1768 	 * Must be processed with channel lock held to avoid missing
1769 	 * a SCSI bus reset for this command.
1770 	 */
1771 	mutex_enter(chan_mtx(chan));
1772 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1773 		/* request sense for a request sense ? */
1774 		if (xs->xs_control & XS_CTL_REQSENSE) {
1775 			scsipi_printaddr(periph);
1776 			printf("request sense for a request sense ?\n");
1777 			/* XXX maybe we should reset the device ? */
1778 			/* we've been frozen because xs->error != XS_NOERROR */
1779 			scsipi_periph_thaw_locked(periph, 1);
1780 			mutex_exit(chan_mtx(chan));
1781 			if (xs->resid < xs->datalen) {
1782 				printf("we read %d bytes of sense anyway:\n",
1783 				    xs->datalen - xs->resid);
1784 				scsipi_print_sense_data((void *)xs->data, 0);
1785 			}
1786 			return EINVAL;
1787 		}
1788 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1789 		scsipi_request_sense(xs);
1790 	} else
1791 		mutex_exit(chan_mtx(chan));
1792 
1793 	/*
1794 	 * If it's a user level request, bypass all usual completion
1795 	 * processing, let the user work it out..
1796 	 */
1797 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1798 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1799 		mutex_enter(chan_mtx(chan));
1800 		if (xs->error != XS_NOERROR)
1801 			scsipi_periph_thaw_locked(periph, 1);
1802 		mutex_exit(chan_mtx(chan));
1803 		scsipi_user_done(xs);
1804 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1805 		return 0;
1806 	}
1807 
1808 	switch (xs->error) {
1809 	case XS_NOERROR:
1810 		error = 0;
1811 		break;
1812 
1813 	case XS_SENSE:
1814 	case XS_SHORTSENSE:
1815 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1816 		break;
1817 
1818 	case XS_RESOURCE_SHORTAGE:
1819 		/*
1820 		 * XXX Should freeze channel's queue.
1821 		 */
1822 		scsipi_printaddr(periph);
1823 		printf("adapter resource shortage\n");
1824 		/* FALLTHROUGH */
1825 
1826 	case XS_BUSY:
1827 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1828 			struct scsipi_max_openings mo;
1829 
1830 			/*
1831 			 * We set the openings to active - 1, assuming that
1832 			 * the command that got us here is the first one that
1833 			 * can't fit into the device's queue.  If that's not
1834 			 * the case, I guess we'll find out soon enough.
1835 			 */
1836 			mo.mo_target = periph->periph_target;
1837 			mo.mo_lun = periph->periph_lun;
1838 			if (periph->periph_active < periph->periph_openings)
1839 				mo.mo_openings = periph->periph_active - 1;
1840 			else
1841 				mo.mo_openings = periph->periph_openings - 1;
1842 #ifdef DIAGNOSTIC
1843 			if (mo.mo_openings < 0) {
1844 				scsipi_printaddr(periph);
1845 				printf("QUEUE FULL resulted in < 0 openings\n");
1846 				panic("scsipi_done");
1847 			}
1848 #endif
1849 			if (mo.mo_openings == 0) {
1850 				scsipi_printaddr(periph);
1851 				printf("QUEUE FULL resulted in 0 openings\n");
1852 				mo.mo_openings = 1;
1853 			}
1854 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1855 			error = ERESTART;
1856 		} else if (xs->xs_retries != 0) {
1857 			xs->xs_retries--;
1858 			/*
1859 			 * Wait one second, and try again.
1860 			 */
1861 			mutex_enter(chan_mtx(chan));
1862 			if ((xs->xs_control & XS_CTL_POLL) ||
1863 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1864 				/* XXX: quite extreme */
1865 				kpause("xsbusy", false, hz, chan_mtx(chan));
1866 			} else if (!callout_pending(&periph->periph_callout)) {
1867 				scsipi_periph_freeze_locked(periph, 1);
1868 				callout_reset(&periph->periph_callout,
1869 				    hz, scsipi_periph_timed_thaw, periph);
1870 			}
1871 			mutex_exit(chan_mtx(chan));
1872 			error = ERESTART;
1873 		} else
1874 			error = EBUSY;
1875 		break;
1876 
1877 	case XS_REQUEUE:
1878 		error = ERESTART;
1879 		break;
1880 
1881 	case XS_SELTIMEOUT:
1882 	case XS_TIMEOUT:
1883 		/*
1884 		 * If the device hasn't gone away, honor retry counts.
1885 		 *
1886 		 * Note that if we're in the middle of probing it,
1887 		 * it won't be found because it isn't here yet so
1888 		 * we won't honor the retry count in that case.
1889 		 */
1890 		if (scsipi_lookup_periph(chan, periph->periph_target,
1891 		    periph->periph_lun) && xs->xs_retries != 0) {
1892 			xs->xs_retries--;
1893 			error = ERESTART;
1894 		} else
1895 			error = EIO;
1896 		break;
1897 
1898 	case XS_RESET:
1899 		if (xs->xs_control & XS_CTL_REQSENSE) {
1900 			/*
1901 			 * request sense interrupted by reset: signal it
1902 			 * with EINTR return code.
1903 			 */
1904 			error = EINTR;
1905 		} else {
1906 			if (xs->xs_retries != 0) {
1907 				xs->xs_retries--;
1908 				error = ERESTART;
1909 			} else
1910 				error = EIO;
1911 		}
1912 		break;
1913 
1914 	case XS_DRIVER_STUFFUP:
1915 		scsipi_printaddr(periph);
1916 		printf("generic HBA error\n");
1917 		error = EIO;
1918 		break;
1919 	default:
1920 		scsipi_printaddr(periph);
1921 		printf("invalid return code from adapter: %d\n", xs->error);
1922 		error = EIO;
1923 		break;
1924 	}
1925 
1926 	mutex_enter(chan_mtx(chan));
1927 	if (error == ERESTART) {
1928 		SDT_PROBE1(scsi, base, xfer, restart,  xs);
1929 		/*
1930 		 * If we get here, the periph has been thawed and frozen
1931 		 * again if we had to issue recovery commands.  Alternatively,
1932 		 * it may have been frozen again and in a timed thaw.  In
1933 		 * any case, we thaw the periph once we re-enqueue the
1934 		 * command.  Once the periph is fully thawed, it will begin
1935 		 * operation again.
1936 		 */
1937 		xs->error = XS_NOERROR;
1938 		xs->status = SCSI_OK;
1939 		xs->xs_status &= ~XS_STS_DONE;
1940 		xs->xs_requeuecnt++;
1941 		error = scsipi_enqueue(xs);
1942 		if (error == 0) {
1943 			scsipi_periph_thaw_locked(periph, 1);
1944 			mutex_exit(chan_mtx(chan));
1945 			return ERESTART;
1946 		}
1947 	}
1948 
1949 	/*
1950 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1951 	 * Thaw it here.
1952 	 */
1953 	if (xs->error != XS_NOERROR)
1954 		scsipi_periph_thaw_locked(periph, 1);
1955 	mutex_exit(chan_mtx(chan));
1956 
1957 	if (periph->periph_switch->psw_done)
1958 		periph->periph_switch->psw_done(xs, error);
1959 
1960 	mutex_enter(chan_mtx(chan));
1961 	if (xs->xs_control & XS_CTL_ASYNC)
1962 		scsipi_put_xs(xs);
1963 	mutex_exit(chan_mtx(chan));
1964 
1965 	return error;
1966 }
1967 
1968 /*
1969  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1970  * returns with a CHECK_CONDITION status. Must be called in valid thread
1971  * context.
1972  */
1973 
1974 static void
1975 scsipi_request_sense(struct scsipi_xfer *xs)
1976 {
1977 	struct scsipi_periph *periph = xs->xs_periph;
1978 	int flags, error;
1979 	struct scsi_request_sense cmd;
1980 
1981 	periph->periph_flags |= PERIPH_SENSE;
1982 
1983 	/* if command was polling, request sense will too */
1984 	flags = xs->xs_control & XS_CTL_POLL;
1985 	/* Polling commands can't sleep */
1986 	if (flags)
1987 		flags |= XS_CTL_NOSLEEP;
1988 
1989 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1990 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1991 
1992 	memset(&cmd, 0, sizeof(cmd));
1993 	cmd.opcode = SCSI_REQUEST_SENSE;
1994 	cmd.length = sizeof(struct scsi_sense_data);
1995 
1996 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1997 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1998 	    0, 1000, NULL, flags);
1999 	periph->periph_flags &= ~PERIPH_SENSE;
2000 	periph->periph_xscheck = NULL;
2001 	switch (error) {
2002 	case 0:
2003 		/* we have a valid sense */
2004 		xs->error = XS_SENSE;
2005 		return;
2006 	case EINTR:
2007 		/* REQUEST_SENSE interrupted by bus reset. */
2008 		xs->error = XS_RESET;
2009 		return;
2010 	case EIO:
2011 		 /* request sense couldn't be performed */
2012 		/*
2013 		 * XXX this isn't quite right but we don't have anything
2014 		 * better for now
2015 		 */
2016 		xs->error = XS_DRIVER_STUFFUP;
2017 		return;
2018 	default:
2019 		 /* Notify that request sense failed. */
2020 		xs->error = XS_DRIVER_STUFFUP;
2021 		scsipi_printaddr(periph);
2022 		printf("request sense failed with error %d\n", error);
2023 		return;
2024 	}
2025 }
2026 
2027 /*
2028  * scsipi_enqueue:
2029  *
2030  *	Enqueue an xfer on a channel.
2031  */
2032 static int
2033 scsipi_enqueue(struct scsipi_xfer *xs)
2034 {
2035 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
2036 	struct scsipi_xfer *qxs;
2037 
2038 	SDT_PROBE1(scsi, base, xfer, enqueue,  xs);
2039 
2040 	/*
2041 	 * If the xfer is to be polled, and there are already jobs on
2042 	 * the queue, we can't proceed.
2043 	 */
2044 	KASSERT(mutex_owned(chan_mtx(chan)));
2045 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
2046 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
2047 		xs->error = XS_DRIVER_STUFFUP;
2048 		return EAGAIN;
2049 	}
2050 
2051 	/*
2052 	 * If we have an URGENT xfer, it's an error recovery command
2053 	 * and it should just go on the head of the channel's queue.
2054 	 */
2055 	if (xs->xs_control & XS_CTL_URGENT) {
2056 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
2057 		goto out;
2058 	}
2059 
2060 	/*
2061 	 * If this xfer has already been on the queue before, we
2062 	 * need to reinsert it in the correct order.  That order is:
2063 	 *
2064 	 *	Immediately before the first xfer for this periph
2065 	 *	with a requeuecnt less than xs->xs_requeuecnt.
2066 	 *
2067 	 * Failing that, at the end of the queue.  (We'll end up
2068 	 * there naturally.)
2069 	 */
2070 	if (xs->xs_requeuecnt != 0) {
2071 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
2072 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
2073 			if (qxs->xs_periph == xs->xs_periph &&
2074 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
2075 				break;
2076 		}
2077 		if (qxs != NULL) {
2078 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
2079 			    channel_q);
2080 			goto out;
2081 		}
2082 	}
2083 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
2084  out:
2085 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
2086 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
2087 	return 0;
2088 }
2089 
2090 /*
2091  * scsipi_run_queue:
2092  *
2093  *	Start as many xfers as possible running on the channel.
2094  */
2095 static void
2096 scsipi_run_queue(struct scsipi_channel *chan)
2097 {
2098 	struct scsipi_xfer *xs;
2099 	struct scsipi_periph *periph;
2100 
2101 	SDT_PROBE1(scsi, base, queue, batch__start,  chan);
2102 	for (;;) {
2103 		mutex_enter(chan_mtx(chan));
2104 
2105 		/*
2106 		 * If the channel is frozen, we can't do any work right
2107 		 * now.
2108 		 */
2109 		if (chan->chan_qfreeze != 0) {
2110 			mutex_exit(chan_mtx(chan));
2111 			break;
2112 		}
2113 
2114 		/*
2115 		 * Look for work to do, and make sure we can do it.
2116 		 */
2117 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
2118 		     xs = TAILQ_NEXT(xs, channel_q)) {
2119 			periph = xs->xs_periph;
2120 
2121 			if ((periph->periph_sent >= periph->periph_openings) ||
2122 			    periph->periph_qfreeze != 0 ||
2123 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
2124 				continue;
2125 
2126 			if ((periph->periph_flags &
2127 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
2128 			    (xs->xs_control & XS_CTL_URGENT) == 0)
2129 				continue;
2130 
2131 			/*
2132 			 * We can issue this xfer!
2133 			 */
2134 			goto got_one;
2135 		}
2136 
2137 		/*
2138 		 * Can't find any work to do right now.
2139 		 */
2140 		mutex_exit(chan_mtx(chan));
2141 		break;
2142 
2143  got_one:
2144 		/*
2145 		 * Have an xfer to run.  Allocate a resource from
2146 		 * the adapter to run it.  If we can't allocate that
2147 		 * resource, we don't dequeue the xfer.
2148 		 */
2149 		if (scsipi_get_resource(chan) == 0) {
2150 			/*
2151 			 * Adapter is out of resources.  If the adapter
2152 			 * supports it, attempt to grow them.
2153 			 */
2154 			if (scsipi_grow_resources(chan) == 0) {
2155 				/*
2156 				 * Wasn't able to grow resources,
2157 				 * nothing more we can do.
2158 				 */
2159 				if (xs->xs_control & XS_CTL_POLL) {
2160 					scsipi_printaddr(xs->xs_periph);
2161 					printf("polling command but no "
2162 					    "adapter resources");
2163 					/* We'll panic shortly... */
2164 				}
2165 				mutex_exit(chan_mtx(chan));
2166 
2167 				/*
2168 				 * XXX: We should be able to note that
2169 				 * XXX: that resources are needed here!
2170 				 */
2171 				break;
2172 			}
2173 			/*
2174 			 * scsipi_grow_resources() allocated the resource
2175 			 * for us.
2176 			 */
2177 		}
2178 
2179 		/*
2180 		 * We have a resource to run this xfer, do it!
2181 		 */
2182 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2183 
2184 		/*
2185 		 * If the command is to be tagged, allocate a tag ID
2186 		 * for it.
2187 		 */
2188 		if (XS_CTL_TAGTYPE(xs) != 0)
2189 			scsipi_get_tag(xs);
2190 		else
2191 			periph->periph_flags |= PERIPH_UNTAG;
2192 		periph->periph_sent++;
2193 		mutex_exit(chan_mtx(chan));
2194 
2195 		SDT_PROBE2(scsi, base, queue, run,  chan, xs);
2196 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
2197 	}
2198 	SDT_PROBE1(scsi, base, queue, batch__done,  chan);
2199 }
2200 
2201 /*
2202  * scsipi_execute_xs:
2203  *
2204  *	Begin execution of an xfer, waiting for it to complete, if necessary.
2205  */
2206 int
2207 scsipi_execute_xs(struct scsipi_xfer *xs)
2208 {
2209 	struct scsipi_periph *periph = xs->xs_periph;
2210 	struct scsipi_channel *chan = periph->periph_channel;
2211 	int oasync, async, poll, error;
2212 
2213 	KASSERT(!cold);
2214 
2215 	scsipi_update_timeouts(xs);
2216 
2217 	(chan->chan_bustype->bustype_cmd)(xs);
2218 
2219 	xs->xs_status &= ~XS_STS_DONE;
2220 	xs->error = XS_NOERROR;
2221 	xs->resid = xs->datalen;
2222 	xs->status = SCSI_OK;
2223 	SDT_PROBE1(scsi, base, xfer, execute,  xs);
2224 
2225 #ifdef SCSIPI_DEBUG
2226 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
2227 		printf("scsipi_execute_xs: ");
2228 		show_scsipi_xs(xs);
2229 		printf("\n");
2230 	}
2231 #endif
2232 
2233 	/*
2234 	 * Deal with command tagging:
2235 	 *
2236 	 *	- If the device's current operating mode doesn't
2237 	 *	  include tagged queueing, clear the tag mask.
2238 	 *
2239 	 *	- If the device's current operating mode *does*
2240 	 *	  include tagged queueing, set the tag_type in
2241 	 *	  the xfer to the appropriate byte for the tag
2242 	 *	  message.
2243 	 */
2244 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
2245 		(xs->xs_control & XS_CTL_REQSENSE)) {
2246 		xs->xs_control &= ~XS_CTL_TAGMASK;
2247 		xs->xs_tag_type = 0;
2248 	} else {
2249 		/*
2250 		 * If the request doesn't specify a tag, give Head
2251 		 * tags to URGENT operations and Simple tags to
2252 		 * everything else.
2253 		 */
2254 		if (XS_CTL_TAGTYPE(xs) == 0) {
2255 			if (xs->xs_control & XS_CTL_URGENT)
2256 				xs->xs_control |= XS_CTL_HEAD_TAG;
2257 			else
2258 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
2259 		}
2260 
2261 		switch (XS_CTL_TAGTYPE(xs)) {
2262 		case XS_CTL_ORDERED_TAG:
2263 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2264 			break;
2265 
2266 		case XS_CTL_SIMPLE_TAG:
2267 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2268 			break;
2269 
2270 		case XS_CTL_HEAD_TAG:
2271 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2272 			break;
2273 
2274 		default:
2275 			scsipi_printaddr(periph);
2276 			printf("invalid tag mask 0x%08x\n",
2277 			    XS_CTL_TAGTYPE(xs));
2278 			panic("scsipi_execute_xs");
2279 		}
2280 	}
2281 
2282 	/* If the adapter wants us to poll, poll. */
2283 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2284 		xs->xs_control |= XS_CTL_POLL;
2285 
2286 	/*
2287 	 * If we don't yet have a completion thread, or we are to poll for
2288 	 * completion, clear the ASYNC flag.
2289 	 */
2290 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
2291 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2292 		xs->xs_control &= ~XS_CTL_ASYNC;
2293 
2294 	async = (xs->xs_control & XS_CTL_ASYNC);
2295 	poll = (xs->xs_control & XS_CTL_POLL);
2296 
2297 #ifdef DIAGNOSTIC
2298 	if (oasync != 0 && xs->bp == NULL)
2299 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2300 #endif
2301 
2302 	/*
2303 	 * Enqueue the transfer.  If we're not polling for completion, this
2304 	 * should ALWAYS return `no error'.
2305 	 */
2306 	error = scsipi_enqueue(xs);
2307 	if (error) {
2308 		if (poll == 0) {
2309 			scsipi_printaddr(periph);
2310 			printf("not polling, but enqueue failed with %d\n",
2311 			    error);
2312 			panic("scsipi_execute_xs");
2313 		}
2314 
2315 		scsipi_printaddr(periph);
2316 		printf("should have flushed queue?\n");
2317 		goto free_xs;
2318 	}
2319 
2320 	mutex_exit(chan_mtx(chan));
2321  restarted:
2322 	scsipi_run_queue(chan);
2323 	mutex_enter(chan_mtx(chan));
2324 
2325 	/*
2326 	 * The xfer is enqueued, and possibly running.  If it's to be
2327 	 * completed asynchronously, just return now.
2328 	 */
2329 	if (async)
2330 		return 0;
2331 
2332 	/*
2333 	 * Not an asynchronous command; wait for it to complete.
2334 	 */
2335 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2336 		if (poll) {
2337 			scsipi_printaddr(periph);
2338 			printf("polling command not done\n");
2339 			panic("scsipi_execute_xs");
2340 		}
2341 		cv_wait(xs_cv(xs), chan_mtx(chan));
2342 	}
2343 
2344 	/*
2345 	 * Command is complete.  scsipi_done() has awakened us to perform
2346 	 * the error handling.
2347 	 */
2348 	mutex_exit(chan_mtx(chan));
2349 	error = scsipi_complete(xs);
2350 	if (error == ERESTART)
2351 		goto restarted;
2352 
2353 	/*
2354 	 * If it was meant to run async and we cleared async ourselves,
2355 	 * don't return an error here. It has already been handled
2356 	 */
2357 	if (oasync)
2358 		error = 0;
2359 	/*
2360 	 * Command completed successfully or fatal error occurred.  Fall
2361 	 * into....
2362 	 */
2363 	mutex_enter(chan_mtx(chan));
2364  free_xs:
2365 	scsipi_put_xs(xs);
2366 	mutex_exit(chan_mtx(chan));
2367 
2368 	/*
2369 	 * Kick the queue, keep it running in case it stopped for some
2370 	 * reason.
2371 	 */
2372 	scsipi_run_queue(chan);
2373 
2374 	mutex_enter(chan_mtx(chan));
2375 	return error;
2376 }
2377 
2378 /*
2379  * scsipi_completion_thread:
2380  *
2381  *	This is the completion thread.  We wait for errors on
2382  *	asynchronous xfers, and perform the error handling
2383  *	function, restarting the command, if necessary.
2384  */
2385 static void
2386 scsipi_completion_thread(void *arg)
2387 {
2388 	struct scsipi_channel *chan = arg;
2389 	struct scsipi_xfer *xs;
2390 
2391 	if (chan->chan_init_cb)
2392 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2393 
2394 	mutex_enter(chan_mtx(chan));
2395 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2396 	for (;;) {
2397 		xs = TAILQ_FIRST(&chan->chan_complete);
2398 		if (xs == NULL && chan->chan_tflags == 0) {
2399 			/* nothing to do; wait */
2400 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2401 			continue;
2402 		}
2403 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2404 			/* call chan_callback from thread context */
2405 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2406 			chan->chan_callback(chan, chan->chan_callback_arg);
2407 			continue;
2408 		}
2409 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2410 			/* attempt to get more openings for this channel */
2411 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2412 			mutex_exit(chan_mtx(chan));
2413 			scsipi_adapter_request(chan,
2414 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2415 			scsipi_channel_thaw(chan, 1);
2416 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2417 				kpause("scsizzz", FALSE, hz/10, NULL);
2418 			mutex_enter(chan_mtx(chan));
2419 			continue;
2420 		}
2421 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2422 			/* explicitly run the queues for this channel */
2423 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2424 			mutex_exit(chan_mtx(chan));
2425 			scsipi_run_queue(chan);
2426 			mutex_enter(chan_mtx(chan));
2427 			continue;
2428 		}
2429 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2430 			break;
2431 		}
2432 		if (xs) {
2433 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2434 			mutex_exit(chan_mtx(chan));
2435 
2436 			/*
2437 			 * Have an xfer with an error; process it.
2438 			 */
2439 			(void) scsipi_complete(xs);
2440 
2441 			/*
2442 			 * Kick the queue; keep it running if it was stopped
2443 			 * for some reason.
2444 			 */
2445 			scsipi_run_queue(chan);
2446 			mutex_enter(chan_mtx(chan));
2447 		}
2448 	}
2449 
2450 	chan->chan_thread = NULL;
2451 
2452 	/* In case parent is waiting for us to exit. */
2453 	cv_broadcast(chan_cv_thread(chan));
2454 	mutex_exit(chan_mtx(chan));
2455 
2456 	kthread_exit(0);
2457 }
2458 /*
2459  * scsipi_thread_call_callback:
2460  *
2461  * 	request to call a callback from the completion thread
2462  */
2463 int
2464 scsipi_thread_call_callback(struct scsipi_channel *chan,
2465     void (*callback)(struct scsipi_channel *, void *), void *arg)
2466 {
2467 
2468 	mutex_enter(chan_mtx(chan));
2469 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2470 		/* kernel thread doesn't exist yet */
2471 		mutex_exit(chan_mtx(chan));
2472 		return ESRCH;
2473 	}
2474 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2475 		mutex_exit(chan_mtx(chan));
2476 		return EBUSY;
2477 	}
2478 	scsipi_channel_freeze(chan, 1);
2479 	chan->chan_callback = callback;
2480 	chan->chan_callback_arg = arg;
2481 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2482 	cv_broadcast(chan_cv_complete(chan));
2483 	mutex_exit(chan_mtx(chan));
2484 	return 0;
2485 }
2486 
2487 /*
2488  * scsipi_async_event:
2489  *
2490  *	Handle an asynchronous event from an adapter.
2491  */
2492 void
2493 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2494     void *arg)
2495 {
2496 	bool lock = chan_running(chan) > 0;
2497 
2498 	if (lock)
2499 		mutex_enter(chan_mtx(chan));
2500 	switch (event) {
2501 	case ASYNC_EVENT_MAX_OPENINGS:
2502 		scsipi_async_event_max_openings(chan,
2503 		    (struct scsipi_max_openings *)arg);
2504 		break;
2505 
2506 	case ASYNC_EVENT_XFER_MODE:
2507 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2508 			chan->chan_bustype->bustype_async_event_xfer_mode(
2509 			    chan, arg);
2510 		}
2511 		break;
2512 	case ASYNC_EVENT_RESET:
2513 		scsipi_async_event_channel_reset(chan);
2514 		break;
2515 	}
2516 	if (lock)
2517 		mutex_exit(chan_mtx(chan));
2518 }
2519 
2520 /*
2521  * scsipi_async_event_max_openings:
2522  *
2523  *	Update the maximum number of outstanding commands a
2524  *	device may have.
2525  */
2526 static void
2527 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2528     struct scsipi_max_openings *mo)
2529 {
2530 	struct scsipi_periph *periph;
2531 	int minlun, maxlun;
2532 
2533 	if (mo->mo_lun == -1) {
2534 		/*
2535 		 * Wildcarded; apply it to all LUNs.
2536 		 */
2537 		minlun = 0;
2538 		maxlun = chan->chan_nluns - 1;
2539 	} else
2540 		minlun = maxlun = mo->mo_lun;
2541 
2542 	/* XXX This could really suck with a large LUN space. */
2543 	for (; minlun <= maxlun; minlun++) {
2544 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2545 		if (periph == NULL)
2546 			continue;
2547 
2548 		if (mo->mo_openings < periph->periph_openings)
2549 			periph->periph_openings = mo->mo_openings;
2550 		else if (mo->mo_openings > periph->periph_openings &&
2551 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2552 			periph->periph_openings = mo->mo_openings;
2553 	}
2554 }
2555 
2556 /*
2557  * scsipi_set_xfer_mode:
2558  *
2559  *	Set the xfer mode for the specified I_T Nexus.
2560  */
2561 void
2562 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2563 {
2564 	struct scsipi_xfer_mode xm;
2565 	struct scsipi_periph *itperiph;
2566 	int lun;
2567 
2568 	/*
2569 	 * Go to the minimal xfer mode.
2570 	 */
2571 	xm.xm_target = target;
2572 	xm.xm_mode = 0;
2573 	xm.xm_period = 0;			/* ignored */
2574 	xm.xm_offset = 0;			/* ignored */
2575 
2576 	/*
2577 	 * Find the first LUN we know about on this I_T Nexus.
2578 	 */
2579 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2580 		itperiph = scsipi_lookup_periph(chan, target, lun);
2581 		if (itperiph != NULL)
2582 			break;
2583 	}
2584 	if (itperiph != NULL) {
2585 		xm.xm_mode = itperiph->periph_cap;
2586 		/*
2587 		 * Now issue the request to the adapter.
2588 		 */
2589 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2590 		/*
2591 		 * If we want this to happen immediately, issue a dummy
2592 		 * command, since most adapters can't really negotiate unless
2593 		 * they're executing a job.
2594 		 */
2595 		if (immed != 0) {
2596 			(void) scsipi_test_unit_ready(itperiph,
2597 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2598 			    XS_CTL_IGNORE_NOT_READY |
2599 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2600 		}
2601 	}
2602 }
2603 
2604 /*
2605  * scsipi_channel_reset:
2606  *
2607  *	handle scsi bus reset
2608  * called with channel lock held
2609  */
2610 static void
2611 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2612 {
2613 	struct scsipi_xfer *xs, *xs_next;
2614 	struct scsipi_periph *periph;
2615 	int target, lun;
2616 
2617 	/*
2618 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2619 	 * commands; as the sense is not available any more.
2620 	 * can't call scsipi_done() from here, as the command has not been
2621 	 * sent to the adapter yet (this would corrupt accounting).
2622 	 */
2623 
2624 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2625 		xs_next = TAILQ_NEXT(xs, channel_q);
2626 		if (xs->xs_control & XS_CTL_REQSENSE) {
2627 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2628 			xs->error = XS_RESET;
2629 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2630 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2631 				    channel_q);
2632 		}
2633 	}
2634 	cv_broadcast(chan_cv_complete(chan));
2635 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2636 	for (target = 0; target < chan->chan_ntargets; target++) {
2637 		if (target == chan->chan_id)
2638 			continue;
2639 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2640 			periph = scsipi_lookup_periph_locked(chan, target, lun);
2641 			if (periph) {
2642 				xs = periph->periph_xscheck;
2643 				if (xs)
2644 					xs->error = XS_RESET;
2645 			}
2646 		}
2647 	}
2648 }
2649 
2650 /*
2651  * scsipi_target_detach:
2652  *
2653  *	detach all periph associated with a I_T
2654  * 	must be called from valid thread context
2655  */
2656 int
2657 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2658     int flags)
2659 {
2660 	struct scsipi_periph *periph;
2661 	device_t tdev;
2662 	int ctarget, mintarget, maxtarget;
2663 	int clun, minlun, maxlun;
2664 	int error = 0;
2665 
2666 	if (target == -1) {
2667 		mintarget = 0;
2668 		maxtarget = chan->chan_ntargets;
2669 	} else {
2670 		if (target == chan->chan_id)
2671 			return EINVAL;
2672 		if (target < 0 || target >= chan->chan_ntargets)
2673 			return EINVAL;
2674 		mintarget = target;
2675 		maxtarget = target + 1;
2676 	}
2677 
2678 	if (lun == -1) {
2679 		minlun = 0;
2680 		maxlun = chan->chan_nluns;
2681 	} else {
2682 		if (lun < 0 || lun >= chan->chan_nluns)
2683 			return EINVAL;
2684 		minlun = lun;
2685 		maxlun = lun + 1;
2686 	}
2687 
2688 	/* for config_detach */
2689 	KERNEL_LOCK(1, curlwp);
2690 
2691 	mutex_enter(chan_mtx(chan));
2692 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2693 		if (ctarget == chan->chan_id)
2694 			continue;
2695 
2696 		for (clun = minlun; clun < maxlun; clun++) {
2697 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2698 			if (periph == NULL)
2699 				continue;
2700 			tdev = periph->periph_dev;
2701 			mutex_exit(chan_mtx(chan));
2702 			error = config_detach(tdev, flags);
2703 			if (error)
2704 				goto out;
2705 			mutex_enter(chan_mtx(chan));
2706 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2707 		}
2708 	}
2709 	mutex_exit(chan_mtx(chan));
2710 
2711 out:
2712 	KERNEL_UNLOCK_ONE(curlwp);
2713 
2714 	return error;
2715 }
2716 
2717 /*
2718  * scsipi_adapter_addref:
2719  *
2720  *	Add a reference to the adapter pointed to by the provided
2721  *	link, enabling the adapter if necessary.
2722  */
2723 int
2724 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2725 {
2726 	int error = 0;
2727 
2728 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2729 	    && adapt->adapt_enable != NULL) {
2730 		scsipi_adapter_lock(adapt);
2731 		error = scsipi_adapter_enable(adapt, 1);
2732 		scsipi_adapter_unlock(adapt);
2733 		if (error)
2734 			atomic_dec_uint(&adapt->adapt_refcnt);
2735 	}
2736 	return error;
2737 }
2738 
2739 /*
2740  * scsipi_adapter_delref:
2741  *
2742  *	Delete a reference to the adapter pointed to by the provided
2743  *	link, disabling the adapter if possible.
2744  */
2745 void
2746 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2747 {
2748 
2749 	membar_release();
2750 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2751 	    && adapt->adapt_enable != NULL) {
2752 		membar_acquire();
2753 		scsipi_adapter_lock(adapt);
2754 		(void) scsipi_adapter_enable(adapt, 0);
2755 		scsipi_adapter_unlock(adapt);
2756 	}
2757 }
2758 
2759 static struct scsipi_syncparam {
2760 	int	ss_factor;
2761 	int	ss_period;	/* ns * 100 */
2762 } scsipi_syncparams[] = {
2763 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2764 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2765 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2766 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2767 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2768 };
2769 static const int scsipi_nsyncparams =
2770     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2771 
2772 int
2773 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2774 {
2775 	int i;
2776 
2777 	for (i = 0; i < scsipi_nsyncparams; i++) {
2778 		if (period <= scsipi_syncparams[i].ss_period)
2779 			return scsipi_syncparams[i].ss_factor;
2780 	}
2781 
2782 	return (period / 100) / 4;
2783 }
2784 
2785 int
2786 scsipi_sync_factor_to_period(int factor)
2787 {
2788 	int i;
2789 
2790 	for (i = 0; i < scsipi_nsyncparams; i++) {
2791 		if (factor == scsipi_syncparams[i].ss_factor)
2792 			return scsipi_syncparams[i].ss_period;
2793 	}
2794 
2795 	return (factor * 4) * 100;
2796 }
2797 
2798 int
2799 scsipi_sync_factor_to_freq(int factor)
2800 {
2801 	int i;
2802 
2803 	for (i = 0; i < scsipi_nsyncparams; i++) {
2804 		if (factor == scsipi_syncparams[i].ss_factor)
2805 			return 100000000 / scsipi_syncparams[i].ss_period;
2806 	}
2807 
2808 	return 10000000 / ((factor * 4) * 10);
2809 }
2810 
2811 static inline void
2812 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2813 {
2814 
2815 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2816 		KERNEL_LOCK(1, NULL);
2817 }
2818 
2819 static inline void
2820 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2821 {
2822 
2823 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2824 		KERNEL_UNLOCK_ONE(NULL);
2825 }
2826 
2827 void
2828 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2829 {
2830 	struct scsipi_adapter *adapt = chan->chan_adapter;
2831 
2832 	scsipi_adapter_lock(adapt);
2833 	(adapt->adapt_minphys)(bp);
2834 	scsipi_adapter_unlock(chan->chan_adapter);
2835 }
2836 
2837 void
2838 scsipi_adapter_request(struct scsipi_channel *chan,
2839 	scsipi_adapter_req_t req, void *arg)
2840 
2841 {
2842 	struct scsipi_adapter *adapt = chan->chan_adapter;
2843 
2844 	scsipi_adapter_lock(adapt);
2845 	SDT_PROBE3(scsi, base, adapter, request__start,  chan, req, arg);
2846 	(adapt->adapt_request)(chan, req, arg);
2847 	SDT_PROBE3(scsi, base, adapter, request__done,  chan, req, arg);
2848 	scsipi_adapter_unlock(adapt);
2849 }
2850 
2851 int
2852 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2853 	void *data, int flag, struct proc *p)
2854 {
2855 	struct scsipi_adapter *adapt = chan->chan_adapter;
2856 	int error;
2857 
2858 	if (adapt->adapt_ioctl == NULL)
2859 		return ENOTTY;
2860 
2861 	scsipi_adapter_lock(adapt);
2862 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2863 	scsipi_adapter_unlock(adapt);
2864 	return error;
2865 }
2866 
2867 int
2868 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2869 {
2870 	int error;
2871 
2872 	scsipi_adapter_lock(adapt);
2873 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2874 	scsipi_adapter_unlock(adapt);
2875 	return error;
2876 }
2877 
2878 #ifdef SCSIPI_DEBUG
2879 /*
2880  * Given a scsipi_xfer, dump the request, in all its glory
2881  */
2882 void
2883 show_scsipi_xs(struct scsipi_xfer *xs)
2884 {
2885 
2886 	printf("xs(%p): ", xs);
2887 	printf("xs_control(0x%08x)", xs->xs_control);
2888 	printf("xs_status(0x%08x)", xs->xs_status);
2889 	printf("periph(%p)", xs->xs_periph);
2890 	printf("retr(0x%x)", xs->xs_retries);
2891 	printf("timo(0x%x)", xs->timeout);
2892 	printf("cmd(%p)", xs->cmd);
2893 	printf("len(0x%x)", xs->cmdlen);
2894 	printf("data(%p)", xs->data);
2895 	printf("len(0x%x)", xs->datalen);
2896 	printf("res(0x%x)", xs->resid);
2897 	printf("err(0x%x)", xs->error);
2898 	printf("bp(%p)", xs->bp);
2899 	show_scsipi_cmd(xs);
2900 }
2901 
2902 void
2903 show_scsipi_cmd(struct scsipi_xfer *xs)
2904 {
2905 	u_char *b = (u_char *) xs->cmd;
2906 	int i = 0;
2907 
2908 	scsipi_printaddr(xs->xs_periph);
2909 	printf(" command: ");
2910 
2911 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2912 		while (i < xs->cmdlen) {
2913 			if (i)
2914 				printf(",");
2915 			printf("0x%x", b[i++]);
2916 		}
2917 		printf("-[%d bytes]\n", xs->datalen);
2918 		if (xs->datalen)
2919 			show_mem(xs->data, uimin(64, xs->datalen));
2920 	} else
2921 		printf("-RESET-\n");
2922 }
2923 
2924 void
2925 show_mem(u_char *address, int num)
2926 {
2927 	int x;
2928 
2929 	printf("------------------------------");
2930 	for (x = 0; x < num; x++) {
2931 		if ((x % 16) == 0)
2932 			printf("\n%03d: ", x);
2933 		printf("%02x ", *address++);
2934 	}
2935 	printf("\n------------------------------\n");
2936 }
2937 #endif /* SCSIPI_DEBUG */
2938