xref: /netbsd-src/sys/dev/scsipi/scsipi_base.c (revision 404ee5b9334f618040b6cdef96a0ff35a6fc4636)
1 /*	$NetBSD: scsipi_base.c,v 1.184 2019/11/10 21:16:37 chs Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998, 1999, 2000, 2002, 2003, 2004 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Charles M. Hannum; by Jason R. Thorpe of the Numerical Aerospace
9  * Simulation Facility, NASA Ames Research Center.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: scsipi_base.c,v 1.184 2019/11/10 21:16:37 chs Exp $");
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_scsi.h"
38 #endif
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/buf.h>
44 #include <sys/uio.h>
45 #include <sys/malloc.h>
46 #include <sys/pool.h>
47 #include <sys/errno.h>
48 #include <sys/device.h>
49 #include <sys/proc.h>
50 #include <sys/kthread.h>
51 #include <sys/hash.h>
52 #include <sys/atomic.h>
53 
54 #include <dev/scsipi/scsi_spc.h>
55 #include <dev/scsipi/scsipi_all.h>
56 #include <dev/scsipi/scsipi_disk.h>
57 #include <dev/scsipi/scsipiconf.h>
58 #include <dev/scsipi/scsipi_base.h>
59 
60 #include <dev/scsipi/scsi_all.h>
61 #include <dev/scsipi/scsi_message.h>
62 
63 #include <machine/param.h>
64 
65 static int	scsipi_complete(struct scsipi_xfer *);
66 static void	scsipi_request_sense(struct scsipi_xfer *);
67 static int	scsipi_enqueue(struct scsipi_xfer *);
68 static void	scsipi_run_queue(struct scsipi_channel *chan);
69 
70 static void	scsipi_completion_thread(void *);
71 
72 static void	scsipi_get_tag(struct scsipi_xfer *);
73 static void	scsipi_put_tag(struct scsipi_xfer *);
74 
75 static int	scsipi_get_resource(struct scsipi_channel *);
76 static void	scsipi_put_resource(struct scsipi_channel *);
77 
78 static void	scsipi_async_event_max_openings(struct scsipi_channel *,
79 		    struct scsipi_max_openings *);
80 static void	scsipi_async_event_channel_reset(struct scsipi_channel *);
81 
82 static void	scsipi_channel_freeze_locked(struct scsipi_channel *, int);
83 
84 static void	scsipi_adapter_lock(struct scsipi_adapter *adapt);
85 static void	scsipi_adapter_unlock(struct scsipi_adapter *adapt);
86 
87 static void	scsipi_update_timeouts(struct scsipi_xfer *xs);
88 
89 static struct pool scsipi_xfer_pool;
90 
91 int scsipi_xs_count = 0;
92 
93 /*
94  * scsipi_init:
95  *
96  *	Called when a scsibus or atapibus is attached to the system
97  *	to initialize shared data structures.
98  */
99 void
100 scsipi_init(void)
101 {
102 	static int scsipi_init_done;
103 
104 	if (scsipi_init_done)
105 		return;
106 	scsipi_init_done = 1;
107 
108 	/* Initialize the scsipi_xfer pool. */
109 	pool_init(&scsipi_xfer_pool, sizeof(struct scsipi_xfer), 0,
110 	    0, 0, "scxspl", NULL, IPL_BIO);
111 	if (pool_prime(&scsipi_xfer_pool,
112 	    PAGE_SIZE / sizeof(struct scsipi_xfer)) == ENOMEM) {
113 		printf("WARNING: not enough memory for scsipi_xfer_pool\n");
114 	}
115 
116 	scsipi_ioctl_init();
117 }
118 
119 /*
120  * scsipi_channel_init:
121  *
122  *	Initialize a scsipi_channel when it is attached.
123  */
124 int
125 scsipi_channel_init(struct scsipi_channel *chan)
126 {
127 	struct scsipi_adapter *adapt = chan->chan_adapter;
128 	int i;
129 
130 	/* Initialize shared data. */
131 	scsipi_init();
132 
133 	/* Initialize the queues. */
134 	TAILQ_INIT(&chan->chan_queue);
135 	TAILQ_INIT(&chan->chan_complete);
136 
137 	for (i = 0; i < SCSIPI_CHAN_PERIPH_BUCKETS; i++)
138 		LIST_INIT(&chan->chan_periphtab[i]);
139 
140 	/*
141 	 * Create the asynchronous completion thread.
142 	 */
143 	if (kthread_create(PRI_NONE, 0, NULL, scsipi_completion_thread, chan,
144 	    &chan->chan_thread, "%s", chan->chan_name)) {
145 		aprint_error_dev(adapt->adapt_dev, "unable to create completion thread for "
146 		    "channel %d\n", chan->chan_channel);
147 		panic("scsipi_channel_init");
148 	}
149 
150 	return 0;
151 }
152 
153 /*
154  * scsipi_channel_shutdown:
155  *
156  *	Shutdown a scsipi_channel.
157  */
158 void
159 scsipi_channel_shutdown(struct scsipi_channel *chan)
160 {
161 
162 	mutex_enter(chan_mtx(chan));
163 	/*
164 	 * Shut down the completion thread.
165 	 */
166 	chan->chan_tflags |= SCSIPI_CHANT_SHUTDOWN;
167 	cv_broadcast(chan_cv_complete(chan));
168 
169 	/*
170 	 * Now wait for the thread to exit.
171 	 */
172 	while (chan->chan_thread != NULL)
173 		cv_wait(chan_cv_thread(chan), chan_mtx(chan));
174 	mutex_exit(chan_mtx(chan));
175 }
176 
177 static uint32_t
178 scsipi_chan_periph_hash(uint64_t t, uint64_t l)
179 {
180 	uint32_t hash;
181 
182 	hash = hash32_buf(&t, sizeof(t), HASH32_BUF_INIT);
183 	hash = hash32_buf(&l, sizeof(l), hash);
184 
185 	return hash & SCSIPI_CHAN_PERIPH_HASHMASK;
186 }
187 
188 /*
189  * scsipi_insert_periph:
190  *
191  *	Insert a periph into the channel.
192  */
193 void
194 scsipi_insert_periph(struct scsipi_channel *chan, struct scsipi_periph *periph)
195 {
196 	uint32_t hash;
197 
198 	hash = scsipi_chan_periph_hash(periph->periph_target,
199 	    periph->periph_lun);
200 
201 	mutex_enter(chan_mtx(chan));
202 	LIST_INSERT_HEAD(&chan->chan_periphtab[hash], periph, periph_hash);
203 	mutex_exit(chan_mtx(chan));
204 }
205 
206 /*
207  * scsipi_remove_periph:
208  *
209  *	Remove a periph from the channel.
210  */
211 void
212 scsipi_remove_periph(struct scsipi_channel *chan,
213     struct scsipi_periph *periph)
214 {
215 
216 	LIST_REMOVE(periph, periph_hash);
217 }
218 
219 /*
220  * scsipi_lookup_periph:
221  *
222  *	Lookup a periph on the specified channel.
223  */
224 static struct scsipi_periph *
225 scsipi_lookup_periph_internal(struct scsipi_channel *chan, int target, int lun, bool lock)
226 {
227 	struct scsipi_periph *periph;
228 	uint32_t hash;
229 
230 	if (target >= chan->chan_ntargets ||
231 	    lun >= chan->chan_nluns)
232 		return NULL;
233 
234 	hash = scsipi_chan_periph_hash(target, lun);
235 
236 	if (lock)
237 		mutex_enter(chan_mtx(chan));
238 	LIST_FOREACH(periph, &chan->chan_periphtab[hash], periph_hash) {
239 		if (periph->periph_target == target &&
240 		    periph->periph_lun == lun)
241 			break;
242 	}
243 	if (lock)
244 		mutex_exit(chan_mtx(chan));
245 
246 	return periph;
247 }
248 
249 struct scsipi_periph *
250 scsipi_lookup_periph_locked(struct scsipi_channel *chan, int target, int lun)
251 {
252 	return scsipi_lookup_periph_internal(chan, target, lun, false);
253 }
254 
255 struct scsipi_periph *
256 scsipi_lookup_periph(struct scsipi_channel *chan, int target, int lun)
257 {
258 	return scsipi_lookup_periph_internal(chan, target, lun, true);
259 }
260 
261 /*
262  * scsipi_get_resource:
263  *
264  *	Allocate a single xfer `resource' from the channel.
265  *
266  *	NOTE: Must be called with channel lock held
267  */
268 static int
269 scsipi_get_resource(struct scsipi_channel *chan)
270 {
271 	struct scsipi_adapter *adapt = chan->chan_adapter;
272 
273 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS) {
274 		if (chan->chan_openings > 0) {
275 			chan->chan_openings--;
276 			return 1;
277 		}
278 		return 0;
279 	}
280 
281 	if (adapt->adapt_openings > 0) {
282 		adapt->adapt_openings--;
283 		return 1;
284 	}
285 	return 0;
286 }
287 
288 /*
289  * scsipi_grow_resources:
290  *
291  *	Attempt to grow resources for a channel.  If this succeeds,
292  *	we allocate one for our caller.
293  *
294  *	NOTE: Must be called with channel lock held
295  */
296 static inline int
297 scsipi_grow_resources(struct scsipi_channel *chan)
298 {
299 
300 	if (chan->chan_flags & SCSIPI_CHAN_CANGROW) {
301 		if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
302 			mutex_exit(chan_mtx(chan));
303 			scsipi_adapter_request(chan,
304 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
305 			mutex_enter(chan_mtx(chan));
306 			return scsipi_get_resource(chan);
307 		}
308 		/*
309 		 * ask the channel thread to do it. It'll have to thaw the
310 		 * queue
311 		 */
312 		scsipi_channel_freeze_locked(chan, 1);
313 		chan->chan_tflags |= SCSIPI_CHANT_GROWRES;
314 		cv_broadcast(chan_cv_complete(chan));
315 		return 0;
316 	}
317 
318 	return 0;
319 }
320 
321 /*
322  * scsipi_put_resource:
323  *
324  *	Free a single xfer `resource' to the channel.
325  *
326  *	NOTE: Must be called with channel lock held
327  */
328 static void
329 scsipi_put_resource(struct scsipi_channel *chan)
330 {
331 	struct scsipi_adapter *adapt = chan->chan_adapter;
332 
333 	if (chan->chan_flags & SCSIPI_CHAN_OPENINGS)
334 		chan->chan_openings++;
335 	else
336 		adapt->adapt_openings++;
337 }
338 
339 /*
340  * scsipi_get_tag:
341  *
342  *	Get a tag ID for the specified xfer.
343  *
344  *	NOTE: Must be called with channel lock held
345  */
346 static void
347 scsipi_get_tag(struct scsipi_xfer *xs)
348 {
349 	struct scsipi_periph *periph = xs->xs_periph;
350 	int bit, tag;
351 	u_int word;
352 
353 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
354 
355 	bit = 0;	/* XXX gcc */
356 	for (word = 0; word < PERIPH_NTAGWORDS; word++) {
357 		bit = ffs(periph->periph_freetags[word]);
358 		if (bit != 0)
359 			break;
360 	}
361 #ifdef DIAGNOSTIC
362 	if (word == PERIPH_NTAGWORDS) {
363 		scsipi_printaddr(periph);
364 		printf("no free tags\n");
365 		panic("scsipi_get_tag");
366 	}
367 #endif
368 
369 	bit -= 1;
370 	periph->periph_freetags[word] &= ~(1U << bit);
371 	tag = (word << 5) | bit;
372 
373 	/* XXX Should eventually disallow this completely. */
374 	if (tag >= periph->periph_openings) {
375 		scsipi_printaddr(periph);
376 		printf("WARNING: tag %d greater than available openings %d\n",
377 		    tag, periph->periph_openings);
378 	}
379 
380 	xs->xs_tag_id = tag;
381 }
382 
383 /*
384  * scsipi_put_tag:
385  *
386  *	Put the tag ID for the specified xfer back into the pool.
387  *
388  *	NOTE: Must be called with channel lock held
389  */
390 static void
391 scsipi_put_tag(struct scsipi_xfer *xs)
392 {
393 	struct scsipi_periph *periph = xs->xs_periph;
394 	int word, bit;
395 
396 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
397 
398 	word = xs->xs_tag_id >> 5;
399 	bit = xs->xs_tag_id & 0x1f;
400 
401 	periph->periph_freetags[word] |= (1U << bit);
402 }
403 
404 /*
405  * scsipi_get_xs:
406  *
407  *	Allocate an xfer descriptor and associate it with the
408  *	specified peripheral.  If the peripheral has no more
409  *	available command openings, we either block waiting for
410  *	one to become available, or fail.
411  *
412  *	When this routine is called with the channel lock held
413  *	the flags must include XS_CTL_NOSLEEP.
414  */
415 struct scsipi_xfer *
416 scsipi_get_xs(struct scsipi_periph *periph, int flags)
417 {
418 	struct scsipi_xfer *xs;
419 	bool lock = (flags & XS_CTL_NOSLEEP) == 0;
420 
421 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_get_xs\n"));
422 
423 	KASSERT(!cold);
424 
425 #ifdef DIAGNOSTIC
426 	/*
427 	 * URGENT commands can never be ASYNC.
428 	 */
429 	if ((flags & (XS_CTL_URGENT|XS_CTL_ASYNC)) ==
430 	    (XS_CTL_URGENT|XS_CTL_ASYNC)) {
431 		scsipi_printaddr(periph);
432 		printf("URGENT and ASYNC\n");
433 		panic("scsipi_get_xs");
434 	}
435 #endif
436 
437 	/*
438 	 * Wait for a command opening to become available.  Rules:
439 	 *
440 	 *	- All xfers must wait for an available opening.
441 	 *	  Exception: URGENT xfers can proceed when
442 	 *	  active == openings, because we use the opening
443 	 *	  of the command we're recovering for.
444 	 *	- if the periph has sense pending, only URGENT & REQSENSE
445 	 *	  xfers may proceed.
446 	 *
447 	 *	- If the periph is recovering, only URGENT xfers may
448 	 *	  proceed.
449 	 *
450 	 *	- If the periph is currently executing a recovery
451 	 *	  command, URGENT commands must block, because only
452 	 *	  one recovery command can execute at a time.
453 	 */
454 	if (lock)
455 		mutex_enter(chan_mtx(periph->periph_channel));
456 	for (;;) {
457 		if (flags & XS_CTL_URGENT) {
458 			if (periph->periph_active > periph->periph_openings)
459 				goto wait_for_opening;
460 			if (periph->periph_flags & PERIPH_SENSE) {
461 				if ((flags & XS_CTL_REQSENSE) == 0)
462 					goto wait_for_opening;
463 			} else {
464 				if ((periph->periph_flags &
465 				    PERIPH_RECOVERY_ACTIVE) != 0)
466 					goto wait_for_opening;
467 				periph->periph_flags |= PERIPH_RECOVERY_ACTIVE;
468 			}
469 			break;
470 		}
471 		if (periph->periph_active >= periph->periph_openings ||
472 		    (periph->periph_flags & PERIPH_RECOVERING) != 0)
473 			goto wait_for_opening;
474 		periph->periph_active++;
475 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
476 		break;
477 
478  wait_for_opening:
479 		if (flags & XS_CTL_NOSLEEP) {
480 			KASSERT(!lock);
481 			return NULL;
482 		}
483 		KASSERT(lock);
484 		SC_DEBUG(periph, SCSIPI_DB3, ("sleeping\n"));
485 		periph->periph_flags |= PERIPH_WAITING;
486 		cv_wait(periph_cv_periph(periph),
487 		    chan_mtx(periph->periph_channel));
488 	}
489 	if (lock)
490 		mutex_exit(chan_mtx(periph->periph_channel));
491 
492 	SC_DEBUG(periph, SCSIPI_DB3, ("calling pool_get\n"));
493 	xs = pool_get(&scsipi_xfer_pool,
494 	    ((flags & XS_CTL_NOSLEEP) != 0 ? PR_NOWAIT : PR_WAITOK));
495 	if (xs == NULL) {
496 		if (lock)
497 			mutex_enter(chan_mtx(periph->periph_channel));
498 		if (flags & XS_CTL_URGENT) {
499 			if ((flags & XS_CTL_REQSENSE) == 0)
500 				periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
501 		} else
502 			periph->periph_active--;
503 		if (lock)
504 			mutex_exit(chan_mtx(periph->periph_channel));
505 		scsipi_printaddr(periph);
506 		printf("unable to allocate %sscsipi_xfer\n",
507 		    (flags & XS_CTL_URGENT) ? "URGENT " : "");
508 	}
509 
510 	SC_DEBUG(periph, SCSIPI_DB3, ("returning\n"));
511 
512 	if (xs != NULL) {
513 		memset(xs, 0, sizeof(*xs));
514 		callout_init(&xs->xs_callout, 0);
515 		xs->xs_periph = periph;
516 		xs->xs_control = flags;
517 		xs->xs_status = 0;
518 		if ((flags & XS_CTL_NOSLEEP) == 0)
519 			mutex_enter(chan_mtx(periph->periph_channel));
520 		TAILQ_INSERT_TAIL(&periph->periph_xferq, xs, device_q);
521 		KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
522 		if ((flags & XS_CTL_NOSLEEP) == 0)
523 			mutex_exit(chan_mtx(periph->periph_channel));
524 	}
525 	return xs;
526 }
527 
528 /*
529  * scsipi_put_xs:
530  *
531  *	Release an xfer descriptor, decreasing the outstanding command
532  *	count for the peripheral.  If there is a thread waiting for
533  *	an opening, wake it up.  If not, kick any queued I/O the
534  *	peripheral may have.
535  *
536  *	NOTE: Must be called with channel lock held
537  */
538 void
539 scsipi_put_xs(struct scsipi_xfer *xs)
540 {
541 	struct scsipi_periph *periph = xs->xs_periph;
542 	int flags = xs->xs_control;
543 
544 	SC_DEBUG(periph, SCSIPI_DB3, ("scsipi_free_xs\n"));
545 	KASSERT(mutex_owned(chan_mtx(periph->periph_channel)));
546 
547 	TAILQ_REMOVE(&periph->periph_xferq, xs, device_q);
548 	callout_destroy(&xs->xs_callout);
549 	pool_put(&scsipi_xfer_pool, xs);
550 
551 #ifdef DIAGNOSTIC
552 	if ((periph->periph_flags & PERIPH_RECOVERY_ACTIVE) != 0 &&
553 	    periph->periph_active == 0) {
554 		scsipi_printaddr(periph);
555 		printf("recovery without a command to recovery for\n");
556 		panic("scsipi_put_xs");
557 	}
558 #endif
559 
560 	if (flags & XS_CTL_URGENT) {
561 		if ((flags & XS_CTL_REQSENSE) == 0)
562 			periph->periph_flags &= ~PERIPH_RECOVERY_ACTIVE;
563 	} else
564 		periph->periph_active--;
565 	if (periph->periph_active == 0 &&
566 	    (periph->periph_flags & PERIPH_WAITDRAIN) != 0) {
567 		periph->periph_flags &= ~PERIPH_WAITDRAIN;
568 		cv_broadcast(periph_cv_active(periph));
569 	}
570 
571 	if (periph->periph_flags & PERIPH_WAITING) {
572 		periph->periph_flags &= ~PERIPH_WAITING;
573 		cv_broadcast(periph_cv_periph(periph));
574 	} else {
575 		if (periph->periph_switch->psw_start != NULL &&
576 		    device_is_active(periph->periph_dev)) {
577 			SC_DEBUG(periph, SCSIPI_DB2,
578 			    ("calling private start()\n"));
579 			(*periph->periph_switch->psw_start)(periph);
580 		}
581 	}
582 }
583 
584 /*
585  * scsipi_channel_freeze:
586  *
587  *	Freeze a channel's xfer queue.
588  */
589 void
590 scsipi_channel_freeze(struct scsipi_channel *chan, int count)
591 {
592 	bool lock = chan_running(chan) > 0;
593 
594 	if (lock)
595 		mutex_enter(chan_mtx(chan));
596 	chan->chan_qfreeze += count;
597 	if (lock)
598 		mutex_exit(chan_mtx(chan));
599 }
600 
601 static void
602 scsipi_channel_freeze_locked(struct scsipi_channel *chan, int count)
603 {
604 
605 	chan->chan_qfreeze += count;
606 }
607 
608 /*
609  * scsipi_channel_thaw:
610  *
611  *	Thaw a channel's xfer queue.
612  */
613 void
614 scsipi_channel_thaw(struct scsipi_channel *chan, int count)
615 {
616 	bool lock = chan_running(chan) > 0;
617 
618 	if (lock)
619 		mutex_enter(chan_mtx(chan));
620 	chan->chan_qfreeze -= count;
621 	/*
622 	 * Don't let the freeze count go negative.
623 	 *
624 	 * Presumably the adapter driver could keep track of this,
625 	 * but it might just be easier to do this here so as to allow
626 	 * multiple callers, including those outside the adapter driver.
627 	 */
628 	if (chan->chan_qfreeze < 0) {
629 		chan->chan_qfreeze = 0;
630 	}
631 	if (lock)
632 		mutex_exit(chan_mtx(chan));
633 
634 	/*
635 	 * until the channel is running
636 	 */
637 	if (!lock)
638 		return;
639 
640 	/*
641 	 * Kick the channel's queue here.  Note, we may be running in
642 	 * interrupt context (softclock or HBA's interrupt), so the adapter
643 	 * driver had better not sleep.
644 	 */
645 	if (chan->chan_qfreeze == 0)
646 		scsipi_run_queue(chan);
647 }
648 
649 /*
650  * scsipi_channel_timed_thaw:
651  *
652  *	Thaw a channel after some time has expired. This will also
653  * 	run the channel's queue if the freeze count has reached 0.
654  */
655 void
656 scsipi_channel_timed_thaw(void *arg)
657 {
658 	struct scsipi_channel *chan = arg;
659 
660 	scsipi_channel_thaw(chan, 1);
661 }
662 
663 /*
664  * scsipi_periph_freeze:
665  *
666  *	Freeze a device's xfer queue.
667  */
668 void
669 scsipi_periph_freeze_locked(struct scsipi_periph *periph, int count)
670 {
671 
672 	periph->periph_qfreeze += count;
673 }
674 
675 /*
676  * scsipi_periph_thaw:
677  *
678  *	Thaw a device's xfer queue.
679  */
680 void
681 scsipi_periph_thaw_locked(struct scsipi_periph *periph, int count)
682 {
683 
684 	periph->periph_qfreeze -= count;
685 #ifdef DIAGNOSTIC
686 	if (periph->periph_qfreeze < 0) {
687 		static const char pc[] = "periph freeze count < 0";
688 		scsipi_printaddr(periph);
689 		printf("%s\n", pc);
690 		panic(pc);
691 	}
692 #endif
693 	if (periph->periph_qfreeze == 0 &&
694 	    (periph->periph_flags & PERIPH_WAITING) != 0)
695 		cv_broadcast(periph_cv_periph(periph));
696 }
697 
698 void
699 scsipi_periph_freeze(struct scsipi_periph *periph, int count)
700 {
701 
702 	mutex_enter(chan_mtx(periph->periph_channel));
703 	scsipi_periph_freeze_locked(periph, count);
704 	mutex_exit(chan_mtx(periph->periph_channel));
705 }
706 
707 void
708 scsipi_periph_thaw(struct scsipi_periph *periph, int count)
709 {
710 
711 	mutex_enter(chan_mtx(periph->periph_channel));
712 	scsipi_periph_thaw_locked(periph, count);
713 	mutex_exit(chan_mtx(periph->periph_channel));
714 }
715 
716 /*
717  * scsipi_periph_timed_thaw:
718  *
719  *	Thaw a device after some time has expired.
720  */
721 void
722 scsipi_periph_timed_thaw(void *arg)
723 {
724 	struct scsipi_periph *periph = arg;
725 	struct scsipi_channel *chan = periph->periph_channel;
726 
727 	callout_stop(&periph->periph_callout);
728 
729 	mutex_enter(chan_mtx(chan));
730 	scsipi_periph_thaw_locked(periph, 1);
731 	if ((periph->periph_channel->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
732 		/*
733 		 * Kick the channel's queue here.  Note, we're running in
734 		 * interrupt context (softclock), so the adapter driver
735 		 * had better not sleep.
736 		 */
737 		mutex_exit(chan_mtx(chan));
738 		scsipi_run_queue(periph->periph_channel);
739 	} else {
740 		/*
741 		 * Tell the completion thread to kick the channel's queue here.
742 		 */
743 		periph->periph_channel->chan_tflags |= SCSIPI_CHANT_KICK;
744 		cv_broadcast(chan_cv_complete(chan));
745 		mutex_exit(chan_mtx(chan));
746 	}
747 }
748 
749 /*
750  * scsipi_wait_drain:
751  *
752  *	Wait for a periph's pending xfers to drain.
753  */
754 void
755 scsipi_wait_drain(struct scsipi_periph *periph)
756 {
757 	struct scsipi_channel *chan = periph->periph_channel;
758 
759 	mutex_enter(chan_mtx(chan));
760 	while (periph->periph_active != 0) {
761 		periph->periph_flags |= PERIPH_WAITDRAIN;
762 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
763 	}
764 	mutex_exit(chan_mtx(chan));
765 }
766 
767 /*
768  * scsipi_kill_pending:
769  *
770  *	Kill off all pending xfers for a periph.
771  *
772  *	NOTE: Must be called with channel lock held
773  */
774 void
775 scsipi_kill_pending(struct scsipi_periph *periph)
776 {
777 	struct scsipi_channel *chan = periph->periph_channel;
778 
779 	(*chan->chan_bustype->bustype_kill_pending)(periph);
780 	while (periph->periph_active != 0) {
781 		periph->periph_flags |= PERIPH_WAITDRAIN;
782 		cv_wait(periph_cv_active(periph), chan_mtx(chan));
783 	}
784 }
785 
786 /*
787  * scsipi_print_cdb:
788  * prints a command descriptor block (for debug purpose, error messages,
789  * SCSIVERBOSE, ...)
790  */
791 void
792 scsipi_print_cdb(struct scsipi_generic *cmd)
793 {
794 	int i, j;
795 
796  	printf("0x%02x", cmd->opcode);
797 
798  	switch (CDB_GROUPID(cmd->opcode)) {
799  	case CDB_GROUPID_0:
800  		j = CDB_GROUP0;
801  		break;
802  	case CDB_GROUPID_1:
803  		j = CDB_GROUP1;
804  		break;
805  	case CDB_GROUPID_2:
806  		j = CDB_GROUP2;
807  		break;
808  	case CDB_GROUPID_3:
809  		j = CDB_GROUP3;
810  		break;
811  	case CDB_GROUPID_4:
812  		j = CDB_GROUP4;
813  		break;
814  	case CDB_GROUPID_5:
815  		j = CDB_GROUP5;
816  		break;
817  	case CDB_GROUPID_6:
818  		j = CDB_GROUP6;
819  		break;
820  	case CDB_GROUPID_7:
821  		j = CDB_GROUP7;
822  		break;
823  	default:
824  		j = 0;
825  	}
826  	if (j == 0)
827  		j = sizeof (cmd->bytes);
828  	for (i = 0; i < j-1; i++) /* already done the opcode */
829  		printf(" %02x", cmd->bytes[i]);
830 }
831 
832 /*
833  * scsipi_interpret_sense:
834  *
835  *	Look at the returned sense and act on the error, determining
836  *	the unix error number to pass back.  (0 = report no error)
837  *
838  *	NOTE: If we return ERESTART, we are expected to haved
839  *	thawed the device!
840  *
841  *	THIS IS THE DEFAULT ERROR HANDLER FOR SCSI DEVICES.
842  */
843 int
844 scsipi_interpret_sense(struct scsipi_xfer *xs)
845 {
846 	struct scsi_sense_data *sense;
847 	struct scsipi_periph *periph = xs->xs_periph;
848 	u_int8_t key;
849 	int error;
850 	u_int32_t info;
851 	static const char *error_mes[] = {
852 		"soft error (corrected)",
853 		"not ready", "medium error",
854 		"non-media hardware failure", "illegal request",
855 		"unit attention", "readonly device",
856 		"no data found", "vendor unique",
857 		"copy aborted", "command aborted",
858 		"search returned equal", "volume overflow",
859 		"verify miscompare", "unknown error key"
860 	};
861 
862 	sense = &xs->sense.scsi_sense;
863 #ifdef SCSIPI_DEBUG
864 	if (periph->periph_flags & SCSIPI_DB1) {
865 	        int count, len;
866 		scsipi_printaddr(periph);
867 		printf(" sense debug information:\n");
868 		printf("\tcode 0x%x valid %d\n",
869 			SSD_RCODE(sense->response_code),
870 			sense->response_code & SSD_RCODE_VALID ? 1 : 0);
871 		printf("\tseg 0x%x key 0x%x ili 0x%x eom 0x%x fmark 0x%x\n",
872 			sense->segment,
873 			SSD_SENSE_KEY(sense->flags),
874 			sense->flags & SSD_ILI ? 1 : 0,
875 			sense->flags & SSD_EOM ? 1 : 0,
876 			sense->flags & SSD_FILEMARK ? 1 : 0);
877 		printf("\ninfo: 0x%x 0x%x 0x%x 0x%x followed by %d "
878 			"extra bytes\n",
879 			sense->info[0],
880 			sense->info[1],
881 			sense->info[2],
882 			sense->info[3],
883 			sense->extra_len);
884 		len = SSD_ADD_BYTES_LIM(sense);
885 		printf("\textra (up to %d bytes): ", len);
886 		for (count = 0; count < len; count++)
887 			printf("0x%x ", sense->csi[count]);
888 		printf("\n");
889 	}
890 #endif
891 
892 	/*
893 	 * If the periph has its own error handler, call it first.
894 	 * If it returns a legit error value, return that, otherwise
895 	 * it wants us to continue with normal error processing.
896 	 */
897 	if (periph->periph_switch->psw_error != NULL) {
898 		SC_DEBUG(periph, SCSIPI_DB2,
899 		    ("calling private err_handler()\n"));
900 		error = (*periph->periph_switch->psw_error)(xs);
901 		if (error != EJUSTRETURN)
902 			return error;
903 	}
904 	/* otherwise use the default */
905 	switch (SSD_RCODE(sense->response_code)) {
906 
907 		/*
908 		 * Old SCSI-1 and SASI devices respond with
909 		 * codes other than 70.
910 		 */
911 	case 0x00:		/* no error (command completed OK) */
912 		return 0;
913 	case 0x04:		/* drive not ready after it was selected */
914 		if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
915 			periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
916 		if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
917 			return 0;
918 		/* XXX - display some sort of error here? */
919 		return EIO;
920 	case 0x20:		/* invalid command */
921 		if ((xs->xs_control &
922 		     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
923 			return 0;
924 		return EINVAL;
925 	case 0x25:		/* invalid LUN (Adaptec ACB-4000) */
926 		return EACCES;
927 
928 		/*
929 		 * If it's code 70, use the extended stuff and
930 		 * interpret the key
931 		 */
932 	case 0x71:		/* delayed error */
933 		scsipi_printaddr(periph);
934 		key = SSD_SENSE_KEY(sense->flags);
935 		printf(" DEFERRED ERROR, key = 0x%x\n", key);
936 		/* FALLTHROUGH */
937 	case 0x70:
938 		if ((sense->response_code & SSD_RCODE_VALID) != 0)
939 			info = _4btol(sense->info);
940 		else
941 			info = 0;
942 		key = SSD_SENSE_KEY(sense->flags);
943 
944 		switch (key) {
945 		case SKEY_NO_SENSE:
946 		case SKEY_RECOVERED_ERROR:
947 			if (xs->resid == xs->datalen && xs->datalen) {
948 				/*
949 				 * Why is this here?
950 				 */
951 				xs->resid = 0;	/* not short read */
952 			}
953 			error = 0;
954 			break;
955 		case SKEY_EQUAL:
956 			error = 0;
957 			break;
958 		case SKEY_NOT_READY:
959 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
960 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
961 			if ((xs->xs_control & XS_CTL_IGNORE_NOT_READY) != 0)
962 				return 0;
963 			if (sense->asc == 0x3A) {
964 				error = ENODEV; /* Medium not present */
965 				if (xs->xs_control & XS_CTL_SILENT_NODEV)
966 					return error;
967 			} else
968 				error = EIO;
969 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
970 				return error;
971 			break;
972 		case SKEY_ILLEGAL_REQUEST:
973 			if ((xs->xs_control &
974 			     XS_CTL_IGNORE_ILLEGAL_REQUEST) != 0)
975 				return 0;
976 			/*
977 			 * Handle the case where a device reports
978 			 * Logical Unit Not Supported during discovery.
979 			 */
980 			if ((xs->xs_control & XS_CTL_DISCOVERY) != 0 &&
981 			    sense->asc == 0x25 &&
982 			    sense->ascq == 0x00)
983 				return EINVAL;
984 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
985 				return EIO;
986 			error = EINVAL;
987 			break;
988 		case SKEY_UNIT_ATTENTION:
989 			if (sense->asc == 0x29 &&
990 			    sense->ascq == 0x00) {
991 				/* device or bus reset */
992 				return ERESTART;
993 			}
994 			if ((periph->periph_flags & PERIPH_REMOVABLE) != 0)
995 				periph->periph_flags &= ~PERIPH_MEDIA_LOADED;
996 			if ((xs->xs_control &
997 			     XS_CTL_IGNORE_MEDIA_CHANGE) != 0 ||
998 				/* XXX Should reupload any transient state. */
999 				(periph->periph_flags &
1000 				 PERIPH_REMOVABLE) == 0) {
1001 				return ERESTART;
1002 			}
1003 			if ((xs->xs_control & XS_CTL_SILENT) != 0)
1004 				return EIO;
1005 			error = EIO;
1006 			break;
1007 		case SKEY_DATA_PROTECT:
1008 			error = EROFS;
1009 			break;
1010 		case SKEY_BLANK_CHECK:
1011 			error = 0;
1012 			break;
1013 		case SKEY_ABORTED_COMMAND:
1014 			if (xs->xs_retries != 0) {
1015 				xs->xs_retries--;
1016 				error = ERESTART;
1017 			} else
1018 				error = EIO;
1019 			break;
1020 		case SKEY_VOLUME_OVERFLOW:
1021 			error = ENOSPC;
1022 			break;
1023 		default:
1024 			error = EIO;
1025 			break;
1026 		}
1027 
1028 		/* Print verbose decode if appropriate and possible */
1029 		if ((key == 0) ||
1030 		    ((xs->xs_control & XS_CTL_SILENT) != 0) ||
1031 		    (scsipi_print_sense(xs, 0) != 0))
1032 			return error;
1033 
1034 		/* Print brief(er) sense information */
1035 		scsipi_printaddr(periph);
1036 		printf("%s", error_mes[key - 1]);
1037 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1038 			switch (key) {
1039 			case SKEY_NOT_READY:
1040 			case SKEY_ILLEGAL_REQUEST:
1041 			case SKEY_UNIT_ATTENTION:
1042 			case SKEY_DATA_PROTECT:
1043 				break;
1044 			case SKEY_BLANK_CHECK:
1045 				printf(", requested size: %d (decimal)",
1046 				    info);
1047 				break;
1048 			case SKEY_ABORTED_COMMAND:
1049 				if (xs->xs_retries)
1050 					printf(", retrying");
1051 				printf(", cmd 0x%x, info 0x%x",
1052 				    xs->cmd->opcode, info);
1053 				break;
1054 			default:
1055 				printf(", info = %d (decimal)", info);
1056 			}
1057 		}
1058 		if (sense->extra_len != 0) {
1059 			int n;
1060 			printf(", data =");
1061 			for (n = 0; n < sense->extra_len; n++)
1062 				printf(" %02x",
1063 				    sense->csi[n]);
1064 		}
1065 		printf("\n");
1066 		return error;
1067 
1068 	/*
1069 	 * Some other code, just report it
1070 	 */
1071 	default:
1072 #if    defined(SCSIDEBUG) || defined(DEBUG)
1073 	{
1074 		static const char *uc = "undecodable sense error";
1075 		int i;
1076 		u_int8_t *cptr = (u_int8_t *) sense;
1077 		scsipi_printaddr(periph);
1078 		if (xs->cmd == &xs->cmdstore) {
1079 			printf("%s for opcode 0x%x, data=",
1080 			    uc, xs->cmdstore.opcode);
1081 		} else {
1082 			printf("%s, data=", uc);
1083 		}
1084 		for (i = 0; i < sizeof (sense); i++)
1085 			printf(" 0x%02x", *(cptr++) & 0xff);
1086 		printf("\n");
1087 	}
1088 #else
1089 		scsipi_printaddr(periph);
1090 		printf("Sense Error Code 0x%x",
1091 			SSD_RCODE(sense->response_code));
1092 		if ((sense->response_code & SSD_RCODE_VALID) != 0) {
1093 			struct scsi_sense_data_unextended *usense =
1094 			    (struct scsi_sense_data_unextended *)sense;
1095 			printf(" at block no. %d (decimal)",
1096 			    _3btol(usense->block));
1097 		}
1098 		printf("\n");
1099 #endif
1100 		return EIO;
1101 	}
1102 }
1103 
1104 /*
1105  * scsipi_test_unit_ready:
1106  *
1107  *	Issue a `test unit ready' request.
1108  */
1109 int
1110 scsipi_test_unit_ready(struct scsipi_periph *periph, int flags)
1111 {
1112 	struct scsi_test_unit_ready cmd;
1113 	int retries;
1114 
1115 	/* some ATAPI drives don't support TEST UNIT READY. Sigh */
1116 	if (periph->periph_quirks & PQUIRK_NOTUR)
1117 		return 0;
1118 
1119 	if (flags & XS_CTL_DISCOVERY)
1120 		retries = 0;
1121 	else
1122 		retries = SCSIPIRETRIES;
1123 
1124 	memset(&cmd, 0, sizeof(cmd));
1125 	cmd.opcode = SCSI_TEST_UNIT_READY;
1126 
1127 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1128 	    retries, 10000, NULL, flags);
1129 }
1130 
1131 static const struct scsipi_inquiry3_pattern {
1132 	const char vendor[8];
1133 	const char product[16];
1134 	const char revision[4];
1135 } scsipi_inquiry3_quirk[] = {
1136 	{ "ES-6600 ", "", "" },
1137 };
1138 
1139 static int
1140 scsipi_inquiry3_ok(const struct scsipi_inquiry_data *ib)
1141 {
1142 	for (size_t i = 0; i < __arraycount(scsipi_inquiry3_quirk); i++) {
1143 		const struct scsipi_inquiry3_pattern *q =
1144 		    &scsipi_inquiry3_quirk[i];
1145 #define MATCH(field) \
1146     (q->field[0] ? memcmp(ib->field, q->field, sizeof(ib->field)) == 0 : 1)
1147 		if (MATCH(vendor) && MATCH(product) && MATCH(revision))
1148 			return 0;
1149 	}
1150 	return 1;
1151 }
1152 
1153 /*
1154  * scsipi_inquire:
1155  *
1156  *	Ask the device about itself.
1157  */
1158 int
1159 scsipi_inquire(struct scsipi_periph *periph, struct scsipi_inquiry_data *inqbuf,
1160     int flags)
1161 {
1162 	struct scsipi_inquiry cmd;
1163 	int error;
1164 	int retries;
1165 
1166 	if (flags & XS_CTL_DISCOVERY)
1167 		retries = 0;
1168 	else
1169 		retries = SCSIPIRETRIES;
1170 
1171 	/*
1172 	 * If we request more data than the device can provide, it SHOULD just
1173 	 * return a short response.  However, some devices error with an
1174 	 * ILLEGAL REQUEST sense code, and yet others have even more special
1175 	 * failture modes (such as the GL641USB flash adapter, which goes loony
1176 	 * and sends corrupted CRCs).  To work around this, and to bring our
1177 	 * behavior more in line with other OSes, we do a shorter inquiry,
1178 	 * covering all the SCSI-2 information, first, and then request more
1179 	 * data iff the "additional length" field indicates there is more.
1180 	 * - mycroft, 2003/10/16
1181 	 */
1182 	memset(&cmd, 0, sizeof(cmd));
1183 	cmd.opcode = INQUIRY;
1184 	cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI2;
1185 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1186 	    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI2, retries,
1187 	    10000, NULL, flags | XS_CTL_DATA_IN);
1188 	if (!error &&
1189 	    inqbuf->additional_length > SCSIPI_INQUIRY_LENGTH_SCSI2 - 4) {
1190 	    if (scsipi_inquiry3_ok(inqbuf)) {
1191 #if 0
1192 printf("inquire: addlen=%d, retrying\n", inqbuf->additional_length);
1193 #endif
1194 		cmd.length = SCSIPI_INQUIRY_LENGTH_SCSI3;
1195 		error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1196 		    (void *)inqbuf, SCSIPI_INQUIRY_LENGTH_SCSI3, retries,
1197 		    10000, NULL, flags | XS_CTL_DATA_IN);
1198 #if 0
1199 printf("inquire: error=%d\n", error);
1200 #endif
1201 	    }
1202 	}
1203 
1204 #ifdef SCSI_OLD_NOINQUIRY
1205 	/*
1206 	 * Kludge for the Adaptec ACB-4000 SCSI->MFM translator.
1207 	 * This board doesn't support the INQUIRY command at all.
1208 	 */
1209 	if (error == EINVAL || error == EACCES) {
1210 		/*
1211 		 * Conjure up an INQUIRY response.
1212 		 */
1213 		inqbuf->device = (error == EINVAL ?
1214 			 SID_QUAL_LU_PRESENT :
1215 			 SID_QUAL_LU_NOTPRESENT) | T_DIRECT;
1216 		inqbuf->dev_qual2 = 0;
1217 		inqbuf->version = 0;
1218 		inqbuf->response_format = SID_FORMAT_SCSI1;
1219 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1220 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1221 		memcpy(inqbuf->vendor, "ADAPTEC ACB-4000            ", 28);
1222 		error = 0;
1223 	}
1224 
1225 	/*
1226 	 * Kludge for the Emulex MT-02 SCSI->QIC translator.
1227 	 * This board gives an empty response to an INQUIRY command.
1228 	 */
1229 	else if (error == 0 &&
1230 	    inqbuf->device == (SID_QUAL_LU_PRESENT | T_DIRECT) &&
1231 	    inqbuf->dev_qual2 == 0 &&
1232 	    inqbuf->version == 0 &&
1233 	    inqbuf->response_format == SID_FORMAT_SCSI1) {
1234 		/*
1235 		 * Fill out the INQUIRY response.
1236 		 */
1237 		inqbuf->device = (SID_QUAL_LU_PRESENT | T_SEQUENTIAL);
1238 		inqbuf->dev_qual2 = SID_REMOVABLE;
1239 		inqbuf->additional_length = SCSIPI_INQUIRY_LENGTH_SCSI2 - 4;
1240 		inqbuf->flags1 = inqbuf->flags2 = inqbuf->flags3 = 0;
1241 		memcpy(inqbuf->vendor, "EMULEX  MT-02 QIC           ", 28);
1242 	}
1243 #endif /* SCSI_OLD_NOINQUIRY */
1244 
1245 	return error;
1246 }
1247 
1248 /*
1249  * scsipi_prevent:
1250  *
1251  *	Prevent or allow the user to remove the media
1252  */
1253 int
1254 scsipi_prevent(struct scsipi_periph *periph, int type, int flags)
1255 {
1256 	struct scsi_prevent_allow_medium_removal cmd;
1257 
1258 	if (periph->periph_quirks & PQUIRK_NODOORLOCK)
1259 		return 0;
1260 
1261 	memset(&cmd, 0, sizeof(cmd));
1262 	cmd.opcode = SCSI_PREVENT_ALLOW_MEDIUM_REMOVAL;
1263 	cmd.how = type;
1264 
1265 	return (scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1266 	    SCSIPIRETRIES, 5000, NULL, flags));
1267 }
1268 
1269 /*
1270  * scsipi_start:
1271  *
1272  *	Send a START UNIT.
1273  */
1274 int
1275 scsipi_start(struct scsipi_periph *periph, int type, int flags)
1276 {
1277 	struct scsipi_start_stop cmd;
1278 
1279 	memset(&cmd, 0, sizeof(cmd));
1280 	cmd.opcode = START_STOP;
1281 	cmd.byte2 = 0x00;
1282 	cmd.how = type;
1283 
1284 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd), 0, 0,
1285 	    SCSIPIRETRIES, (type & SSS_START) ? 60000 : 10000, NULL, flags);
1286 }
1287 
1288 /*
1289  * scsipi_mode_sense, scsipi_mode_sense_big:
1290  *	get a sense page from a device
1291  */
1292 
1293 int
1294 scsipi_mode_sense(struct scsipi_periph *periph, int byte2, int page,
1295     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1296     int timeout)
1297 {
1298 	struct scsi_mode_sense_6 cmd;
1299 
1300 	memset(&cmd, 0, sizeof(cmd));
1301 	cmd.opcode = SCSI_MODE_SENSE_6;
1302 	cmd.byte2 = byte2;
1303 	cmd.page = page;
1304 	cmd.length = len & 0xff;
1305 
1306 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1307 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1308 }
1309 
1310 int
1311 scsipi_mode_sense_big(struct scsipi_periph *periph, int byte2, int page,
1312     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1313     int timeout)
1314 {
1315 	struct scsi_mode_sense_10 cmd;
1316 
1317 	memset(&cmd, 0, sizeof(cmd));
1318 	cmd.opcode = SCSI_MODE_SENSE_10;
1319 	cmd.byte2 = byte2;
1320 	cmd.page = page;
1321 	_lto2b(len, cmd.length);
1322 
1323 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1324 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_IN);
1325 }
1326 
1327 int
1328 scsipi_mode_select(struct scsipi_periph *periph, int byte2,
1329     struct scsi_mode_parameter_header_6 *data, int len, int flags, int retries,
1330     int timeout)
1331 {
1332 	struct scsi_mode_select_6 cmd;
1333 
1334 	memset(&cmd, 0, sizeof(cmd));
1335 	cmd.opcode = SCSI_MODE_SELECT_6;
1336 	cmd.byte2 = byte2;
1337 	cmd.length = len & 0xff;
1338 
1339 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1340 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1341 }
1342 
1343 int
1344 scsipi_mode_select_big(struct scsipi_periph *periph, int byte2,
1345     struct scsi_mode_parameter_header_10 *data, int len, int flags, int retries,
1346     int timeout)
1347 {
1348 	struct scsi_mode_select_10 cmd;
1349 
1350 	memset(&cmd, 0, sizeof(cmd));
1351 	cmd.opcode = SCSI_MODE_SELECT_10;
1352 	cmd.byte2 = byte2;
1353 	_lto2b(len, cmd.length);
1354 
1355 	return scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1356 	    (void *)data, len, retries, timeout, NULL, flags | XS_CTL_DATA_OUT);
1357 }
1358 
1359 /*
1360  * scsipi_get_opcodeinfo:
1361  *
1362  * query the device for supported commends and their timeout
1363  * building a timeout lookup table if timeout information is available.
1364  */
1365 void
1366 scsipi_get_opcodeinfo(struct scsipi_periph *periph)
1367 {
1368 	u_int8_t *data;
1369 	int len = 16*1024;
1370 	int rc;
1371 	struct scsi_repsuppopcode cmd;
1372 
1373 	/* refrain from asking for supported opcodes */
1374 	if (periph->periph_quirks & PQUIRK_NOREPSUPPOPC ||
1375 	    periph->periph_type == T_PROCESSOR || /* spec. */
1376 	    periph->periph_type == T_CDROM) /* spec. */
1377 		return;
1378 
1379 	scsipi_free_opcodeinfo(periph);
1380 
1381 	/*
1382 	 * query REPORT SUPPORTED OPERATION CODES
1383 	 * if OK
1384 	 *   enumerate all codes
1385 	 *     if timeout exists insert maximum into opcode table
1386 	 */
1387 
1388 	data = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
1389 
1390 	memset(&cmd, 0, sizeof(cmd));
1391 	cmd.opcode = SCSI_MAINTENANCE_IN;
1392 	cmd.svcaction = RSOC_REPORT_SUPPORTED_OPCODES;
1393 	cmd.repoption = RSOC_RCTD|RSOC_ALL;
1394 	_lto4b(len, cmd.alloclen);
1395 
1396 	rc = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1397 			    (void *)data, len, 0, 1000, NULL,
1398 			    XS_CTL_DATA_IN|XS_CTL_SILENT);
1399 
1400 	if (rc == 0) {
1401 		int count;
1402                 int dlen = _4btol(data);
1403                 u_int8_t *c = data + 4;
1404 
1405 		SC_DEBUG(periph, SCSIPI_DB3,
1406 			 ("supported opcode timeout-values loaded\n"));
1407 		SC_DEBUG(periph, SCSIPI_DB3,
1408 			 ("CMD  LEN  SA    spec  nom. time  cmd timeout\n"));
1409 
1410 		struct scsipi_opcodes *tot = malloc(sizeof(struct scsipi_opcodes),
1411 		    M_DEVBUF, M_WAITOK|M_ZERO);
1412 
1413 		count = 0;
1414                 while (tot != NULL &&
1415 		       dlen >= (int)sizeof(struct scsi_repsupopcode_all_commands_descriptor)) {
1416                         struct scsi_repsupopcode_all_commands_descriptor *acd
1417 				= (struct scsi_repsupopcode_all_commands_descriptor *)c;
1418 #ifdef SCSIPI_DEBUG
1419                         int cdblen = _2btol((const u_int8_t *)&acd->cdblen);
1420 #endif
1421                         dlen -= sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1422                         c += sizeof(struct scsi_repsupopcode_all_commands_descriptor);
1423                         SC_DEBUG(periph, SCSIPI_DB3,
1424 				 ("0x%02x(%2d) ", acd->opcode, cdblen));
1425 
1426 			tot->opcode_info[acd->opcode].ti_flags = SCSIPI_TI_VALID;
1427 
1428                         if (acd->flags & RSOC_ACD_SERVACTV) {
1429                                 SC_DEBUGN(periph, SCSIPI_DB3,
1430 					 ("0x%02x%02x ",
1431 					  acd->serviceaction[0],
1432 					  acd->serviceaction[1]));
1433                         } else {
1434 				SC_DEBUGN(periph, SCSIPI_DB3, ("       "));
1435                         }
1436 
1437                         if (acd->flags & RSOC_ACD_CTDP
1438 			    && dlen >= (int)sizeof(struct scsi_repsupopcode_timeouts_descriptor)) {
1439                                 struct scsi_repsupopcode_timeouts_descriptor *td
1440 					= (struct scsi_repsupopcode_timeouts_descriptor *)c;
1441                                 long nomto = _4btol(td->nom_process_timeout);
1442                                 long cmdto = _4btol(td->cmd_process_timeout);
1443 				long t = (cmdto > nomto) ? cmdto : nomto;
1444 
1445                                 dlen -= sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1446                                 c += sizeof(struct scsi_repsupopcode_timeouts_descriptor);
1447 
1448                                 SC_DEBUGN(periph, SCSIPI_DB3,
1449 					  ("0x%02x %10ld %10ld",
1450 					   td->cmd_specific,
1451 					   nomto, cmdto));
1452 
1453 				if (t > tot->opcode_info[acd->opcode].ti_timeout) {
1454 					tot->opcode_info[acd->opcode].ti_timeout = t;
1455 					++count;
1456 				}
1457                         }
1458                         SC_DEBUGN(periph, SCSIPI_DB3,("\n"));
1459                 }
1460 
1461 		if (count > 0) {
1462 			periph->periph_opcs = tot;
1463 		} else {
1464 			free(tot, M_DEVBUF);
1465 			SC_DEBUG(periph, SCSIPI_DB3,
1466 			 	("no usable timeout values available\n"));
1467 		}
1468 	} else {
1469 		SC_DEBUG(periph, SCSIPI_DB3,
1470 			 ("SCSI_MAINTENANCE_IN"
1471 			  "[RSOC_REPORT_SUPPORTED_OPCODES] failed error=%d"
1472 			  " - no device provided timeout "
1473 			  "values available\n", rc));
1474 	}
1475 
1476 	free(data, M_DEVBUF);
1477 }
1478 
1479 /*
1480  * scsipi_update_timeouts:
1481  * 	Overide timeout value if device/config provided
1482  *      timeouts are available.
1483  */
1484 static void
1485 scsipi_update_timeouts(struct scsipi_xfer *xs)
1486 {
1487 	struct scsipi_opcodes *opcs;
1488 	u_int8_t cmd;
1489 	int timeout;
1490 	struct scsipi_opinfo *oi;
1491 
1492 	if (xs->timeout <= 0) {
1493 		return;
1494 	}
1495 
1496 	opcs = xs->xs_periph->periph_opcs;
1497 
1498 	if (opcs == NULL) {
1499 		return;
1500 	}
1501 
1502 	cmd = xs->cmd->opcode;
1503 	oi = &opcs->opcode_info[cmd];
1504 
1505 	timeout = 1000 * (int)oi->ti_timeout;
1506 
1507 
1508 	if (timeout > xs->timeout && timeout < 86400000) {
1509 		/*
1510 		 * pick up device configured timeouts if they
1511 		 * are longer than the requested ones but less
1512 		 * than a day
1513 		 */
1514 #ifdef SCSIPI_DEBUG
1515 		if ((oi->ti_flags & SCSIPI_TI_LOGGED) == 0) {
1516 			SC_DEBUG(xs->xs_periph, SCSIPI_DB3,
1517 				 ("Overriding command 0x%02x "
1518 				  "timeout of %d with %d ms\n",
1519 				  cmd, xs->timeout, timeout));
1520 			oi->ti_flags |= SCSIPI_TI_LOGGED;
1521 		}
1522 #endif
1523 		xs->timeout = timeout;
1524 	}
1525 }
1526 
1527 /*
1528  * scsipi_free_opcodeinfo:
1529  *
1530  * free the opcode information table
1531  */
1532 void
1533 scsipi_free_opcodeinfo(struct scsipi_periph *periph)
1534 {
1535 	if (periph->periph_opcs != NULL) {
1536 		free(periph->periph_opcs, M_DEVBUF);
1537 	}
1538 
1539 	periph->periph_opcs = NULL;
1540 }
1541 
1542 /*
1543  * scsipi_done:
1544  *
1545  *	This routine is called by an adapter's interrupt handler when
1546  *	an xfer is completed.
1547  */
1548 void
1549 scsipi_done(struct scsipi_xfer *xs)
1550 {
1551 	struct scsipi_periph *periph = xs->xs_periph;
1552 	struct scsipi_channel *chan = periph->periph_channel;
1553 	int freezecnt;
1554 
1555 	SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_done\n"));
1556 #ifdef SCSIPI_DEBUG
1557 	if (periph->periph_dbflags & SCSIPI_DB1)
1558 		show_scsipi_cmd(xs);
1559 #endif
1560 
1561 	mutex_enter(chan_mtx(chan));
1562 	/*
1563 	 * The resource this command was using is now free.
1564 	 */
1565 	if (xs->xs_status & XS_STS_DONE) {
1566 		/* XXX in certain circumstances, such as a device
1567 		 * being detached, a xs that has already been
1568 		 * scsipi_done()'d by the main thread will be done'd
1569 		 * again by scsibusdetach(). Putting the xs on the
1570 		 * chan_complete queue causes list corruption and
1571 		 * everyone dies. This prevents that, but perhaps
1572 		 * there should be better coordination somewhere such
1573 		 * that this won't ever happen (and can be turned into
1574 		 * a KASSERT().
1575 		 */
1576 		mutex_exit(chan_mtx(chan));
1577 		goto out;
1578 	}
1579 	scsipi_put_resource(chan);
1580 	xs->xs_periph->periph_sent--;
1581 
1582 	/*
1583 	 * If the command was tagged, free the tag.
1584 	 */
1585 	if (XS_CTL_TAGTYPE(xs) != 0)
1586 		scsipi_put_tag(xs);
1587 	else
1588 		periph->periph_flags &= ~PERIPH_UNTAG;
1589 
1590 	/* Mark the command as `done'. */
1591 	xs->xs_status |= XS_STS_DONE;
1592 
1593 #ifdef DIAGNOSTIC
1594 	if ((xs->xs_control & (XS_CTL_ASYNC|XS_CTL_POLL)) ==
1595 	    (XS_CTL_ASYNC|XS_CTL_POLL))
1596 		panic("scsipi_done: ASYNC and POLL");
1597 #endif
1598 
1599 	/*
1600 	 * If the xfer had an error of any sort, freeze the
1601 	 * periph's queue.  Freeze it again if we were requested
1602 	 * to do so in the xfer.
1603 	 */
1604 	freezecnt = 0;
1605 	if (xs->error != XS_NOERROR)
1606 		freezecnt++;
1607 	if (xs->xs_control & XS_CTL_FREEZE_PERIPH)
1608 		freezecnt++;
1609 	if (freezecnt != 0)
1610 		scsipi_periph_freeze_locked(periph, freezecnt);
1611 
1612 	/*
1613 	 * record the xfer with a pending sense, in case a SCSI reset is
1614 	 * received before the thread is waked up.
1615 	 */
1616 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1617 		periph->periph_flags |= PERIPH_SENSE;
1618 		periph->periph_xscheck = xs;
1619 	}
1620 
1621 	/*
1622 	 * If this was an xfer that was not to complete asynchronously,
1623 	 * let the requesting thread perform error checking/handling
1624 	 * in its context.
1625 	 */
1626 	if ((xs->xs_control & XS_CTL_ASYNC) == 0) {
1627 		/*
1628 		 * If it's a polling job, just return, to unwind the
1629 		 * call graph.  We don't need to restart the queue,
1630 		 * because pollings jobs are treated specially, and
1631 		 * are really only used during crash dumps anyway
1632 		 * (XXX or during boot-time autconfiguration of
1633 		 * ATAPI devices).
1634 		 */
1635 		if (xs->xs_control & XS_CTL_POLL) {
1636 			mutex_exit(chan_mtx(chan));
1637 			return;
1638 		}
1639 		cv_broadcast(xs_cv(xs));
1640 		mutex_exit(chan_mtx(chan));
1641 		goto out;
1642 	}
1643 
1644 	/*
1645 	 * Catch the extremely common case of I/O completing
1646 	 * without error; no use in taking a context switch
1647 	 * if we can handle it in interrupt context.
1648 	 */
1649 	if (xs->error == XS_NOERROR) {
1650 		mutex_exit(chan_mtx(chan));
1651 		(void) scsipi_complete(xs);
1652 		goto out;
1653 	}
1654 
1655 	/*
1656 	 * There is an error on this xfer.  Put it on the channel's
1657 	 * completion queue, and wake up the completion thread.
1658 	 */
1659 	TAILQ_INSERT_TAIL(&chan->chan_complete, xs, channel_q);
1660 	cv_broadcast(chan_cv_complete(chan));
1661 	mutex_exit(chan_mtx(chan));
1662 
1663  out:
1664 	/*
1665 	 * If there are more xfers on the channel's queue, attempt to
1666 	 * run them.
1667 	 */
1668 	scsipi_run_queue(chan);
1669 }
1670 
1671 /*
1672  * scsipi_complete:
1673  *
1674  *	Completion of a scsipi_xfer.  This is the guts of scsipi_done().
1675  *
1676  *	NOTE: This routine MUST be called with valid thread context
1677  *	except for the case where the following two conditions are
1678  *	true:
1679  *
1680  *		xs->error == XS_NOERROR
1681  *		XS_CTL_ASYNC is set in xs->xs_control
1682  *
1683  *	The semantics of this routine can be tricky, so here is an
1684  *	explanation:
1685  *
1686  *		0		Xfer completed successfully.
1687  *
1688  *		ERESTART	Xfer had an error, but was restarted.
1689  *
1690  *		anything else	Xfer had an error, return value is Unix
1691  *				errno.
1692  *
1693  *	If the return value is anything but ERESTART:
1694  *
1695  *		- If XS_CTL_ASYNC is set, `xs' has been freed back to
1696  *		  the pool.
1697  *		- If there is a buf associated with the xfer,
1698  *		  it has been biodone()'d.
1699  */
1700 static int
1701 scsipi_complete(struct scsipi_xfer *xs)
1702 {
1703 	struct scsipi_periph *periph = xs->xs_periph;
1704 	struct scsipi_channel *chan = periph->periph_channel;
1705 	int error;
1706 
1707 #ifdef DIAGNOSTIC
1708 	if ((xs->xs_control & XS_CTL_ASYNC) != 0 && xs->bp == NULL)
1709 		panic("scsipi_complete: XS_CTL_ASYNC but no buf");
1710 #endif
1711 	/*
1712 	 * If command terminated with a CHECK CONDITION, we need to issue a
1713 	 * REQUEST_SENSE command. Once the REQUEST_SENSE has been processed
1714 	 * we'll have the real status.
1715 	 * Must be processed with channel lock held to avoid missing
1716 	 * a SCSI bus reset for this command.
1717 	 */
1718 	mutex_enter(chan_mtx(chan));
1719 	if (xs->error == XS_BUSY && xs->status == SCSI_CHECK) {
1720 		/* request sense for a request sense ? */
1721 		if (xs->xs_control & XS_CTL_REQSENSE) {
1722 			scsipi_printaddr(periph);
1723 			printf("request sense for a request sense ?\n");
1724 			/* XXX maybe we should reset the device ? */
1725 			/* we've been frozen because xs->error != XS_NOERROR */
1726 			scsipi_periph_thaw_locked(periph, 1);
1727 			mutex_exit(chan_mtx(chan));
1728 			if (xs->resid < xs->datalen) {
1729 				printf("we read %d bytes of sense anyway:\n",
1730 				    xs->datalen - xs->resid);
1731 				scsipi_print_sense_data((void *)xs->data, 0);
1732 			}
1733 			return EINVAL;
1734 		}
1735 		mutex_exit(chan_mtx(chan)); // XXX allows other commands to queue or run
1736 		scsipi_request_sense(xs);
1737 	} else
1738 		mutex_exit(chan_mtx(chan));
1739 
1740 	/*
1741 	 * If it's a user level request, bypass all usual completion
1742 	 * processing, let the user work it out..
1743 	 */
1744 	if ((xs->xs_control & XS_CTL_USERCMD) != 0) {
1745 		SC_DEBUG(periph, SCSIPI_DB3, ("calling user done()\n"));
1746 		mutex_enter(chan_mtx(chan));
1747 		if (xs->error != XS_NOERROR)
1748 			scsipi_periph_thaw_locked(periph, 1);
1749 		mutex_exit(chan_mtx(chan));
1750 		scsipi_user_done(xs);
1751 		SC_DEBUG(periph, SCSIPI_DB3, ("returned from user done()\n "));
1752 		return 0;
1753 	}
1754 
1755 	switch (xs->error) {
1756 	case XS_NOERROR:
1757 		error = 0;
1758 		break;
1759 
1760 	case XS_SENSE:
1761 	case XS_SHORTSENSE:
1762 		error = (*chan->chan_bustype->bustype_interpret_sense)(xs);
1763 		break;
1764 
1765 	case XS_RESOURCE_SHORTAGE:
1766 		/*
1767 		 * XXX Should freeze channel's queue.
1768 		 */
1769 		scsipi_printaddr(periph);
1770 		printf("adapter resource shortage\n");
1771 		/* FALLTHROUGH */
1772 
1773 	case XS_BUSY:
1774 		if (xs->error == XS_BUSY && xs->status == SCSI_QUEUE_FULL) {
1775 			struct scsipi_max_openings mo;
1776 
1777 			/*
1778 			 * We set the openings to active - 1, assuming that
1779 			 * the command that got us here is the first one that
1780 			 * can't fit into the device's queue.  If that's not
1781 			 * the case, I guess we'll find out soon enough.
1782 			 */
1783 			mo.mo_target = periph->periph_target;
1784 			mo.mo_lun = periph->periph_lun;
1785 			if (periph->periph_active < periph->periph_openings)
1786 				mo.mo_openings = periph->periph_active - 1;
1787 			else
1788 				mo.mo_openings = periph->periph_openings - 1;
1789 #ifdef DIAGNOSTIC
1790 			if (mo.mo_openings < 0) {
1791 				scsipi_printaddr(periph);
1792 				printf("QUEUE FULL resulted in < 0 openings\n");
1793 				panic("scsipi_done");
1794 			}
1795 #endif
1796 			if (mo.mo_openings == 0) {
1797 				scsipi_printaddr(periph);
1798 				printf("QUEUE FULL resulted in 0 openings\n");
1799 				mo.mo_openings = 1;
1800 			}
1801 			scsipi_async_event(chan, ASYNC_EVENT_MAX_OPENINGS, &mo);
1802 			error = ERESTART;
1803 		} else if (xs->xs_retries != 0) {
1804 			xs->xs_retries--;
1805 			/*
1806 			 * Wait one second, and try again.
1807 			 */
1808 			mutex_enter(chan_mtx(chan));
1809 			if ((xs->xs_control & XS_CTL_POLL) ||
1810 			    (chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
1811 				/* XXX: quite extreme */
1812 				kpause("xsbusy", false, hz, chan_mtx(chan));
1813 			} else if (!callout_pending(&periph->periph_callout)) {
1814 				scsipi_periph_freeze_locked(periph, 1);
1815 				callout_reset(&periph->periph_callout,
1816 				    hz, scsipi_periph_timed_thaw, periph);
1817 			}
1818 			mutex_exit(chan_mtx(chan));
1819 			error = ERESTART;
1820 		} else
1821 			error = EBUSY;
1822 		break;
1823 
1824 	case XS_REQUEUE:
1825 		error = ERESTART;
1826 		break;
1827 
1828 	case XS_SELTIMEOUT:
1829 	case XS_TIMEOUT:
1830 		/*
1831 		 * If the device hasn't gone away, honor retry counts.
1832 		 *
1833 		 * Note that if we're in the middle of probing it,
1834 		 * it won't be found because it isn't here yet so
1835 		 * we won't honor the retry count in that case.
1836 		 */
1837 		if (scsipi_lookup_periph(chan, periph->periph_target,
1838 		    periph->periph_lun) && xs->xs_retries != 0) {
1839 			xs->xs_retries--;
1840 			error = ERESTART;
1841 		} else
1842 			error = EIO;
1843 		break;
1844 
1845 	case XS_RESET:
1846 		if (xs->xs_control & XS_CTL_REQSENSE) {
1847 			/*
1848 			 * request sense interrupted by reset: signal it
1849 			 * with EINTR return code.
1850 			 */
1851 			error = EINTR;
1852 		} else {
1853 			if (xs->xs_retries != 0) {
1854 				xs->xs_retries--;
1855 				error = ERESTART;
1856 			} else
1857 				error = EIO;
1858 		}
1859 		break;
1860 
1861 	case XS_DRIVER_STUFFUP:
1862 		scsipi_printaddr(periph);
1863 		printf("generic HBA error\n");
1864 		error = EIO;
1865 		break;
1866 	default:
1867 		scsipi_printaddr(periph);
1868 		printf("invalid return code from adapter: %d\n", xs->error);
1869 		error = EIO;
1870 		break;
1871 	}
1872 
1873 	mutex_enter(chan_mtx(chan));
1874 	if (error == ERESTART) {
1875 		/*
1876 		 * If we get here, the periph has been thawed and frozen
1877 		 * again if we had to issue recovery commands.  Alternatively,
1878 		 * it may have been frozen again and in a timed thaw.  In
1879 		 * any case, we thaw the periph once we re-enqueue the
1880 		 * command.  Once the periph is fully thawed, it will begin
1881 		 * operation again.
1882 		 */
1883 		xs->error = XS_NOERROR;
1884 		xs->status = SCSI_OK;
1885 		xs->xs_status &= ~XS_STS_DONE;
1886 		xs->xs_requeuecnt++;
1887 		error = scsipi_enqueue(xs);
1888 		if (error == 0) {
1889 			scsipi_periph_thaw_locked(periph, 1);
1890 			mutex_exit(chan_mtx(chan));
1891 			return ERESTART;
1892 		}
1893 	}
1894 
1895 	/*
1896 	 * scsipi_done() freezes the queue if not XS_NOERROR.
1897 	 * Thaw it here.
1898 	 */
1899 	if (xs->error != XS_NOERROR)
1900 		scsipi_periph_thaw_locked(periph, 1);
1901 	mutex_exit(chan_mtx(chan));
1902 
1903 	if (periph->periph_switch->psw_done)
1904 		periph->periph_switch->psw_done(xs, error);
1905 
1906 	mutex_enter(chan_mtx(chan));
1907 	if (xs->xs_control & XS_CTL_ASYNC)
1908 		scsipi_put_xs(xs);
1909 	mutex_exit(chan_mtx(chan));
1910 
1911 	return error;
1912 }
1913 
1914 /*
1915  * Issue a request sense for the given scsipi_xfer. Called when the xfer
1916  * returns with a CHECK_CONDITION status. Must be called in valid thread
1917  * context.
1918  */
1919 
1920 static void
1921 scsipi_request_sense(struct scsipi_xfer *xs)
1922 {
1923 	struct scsipi_periph *periph = xs->xs_periph;
1924 	int flags, error;
1925 	struct scsi_request_sense cmd;
1926 
1927 	periph->periph_flags |= PERIPH_SENSE;
1928 
1929 	/* if command was polling, request sense will too */
1930 	flags = xs->xs_control & XS_CTL_POLL;
1931 	/* Polling commands can't sleep */
1932 	if (flags)
1933 		flags |= XS_CTL_NOSLEEP;
1934 
1935 	flags |= XS_CTL_REQSENSE | XS_CTL_URGENT | XS_CTL_DATA_IN |
1936 	    XS_CTL_THAW_PERIPH | XS_CTL_FREEZE_PERIPH;
1937 
1938 	memset(&cmd, 0, sizeof(cmd));
1939 	cmd.opcode = SCSI_REQUEST_SENSE;
1940 	cmd.length = sizeof(struct scsi_sense_data);
1941 
1942 	error = scsipi_command(periph, (void *)&cmd, sizeof(cmd),
1943 	    (void *)&xs->sense.scsi_sense, sizeof(struct scsi_sense_data),
1944 	    0, 1000, NULL, flags);
1945 	periph->periph_flags &= ~PERIPH_SENSE;
1946 	periph->periph_xscheck = NULL;
1947 	switch (error) {
1948 	case 0:
1949 		/* we have a valid sense */
1950 		xs->error = XS_SENSE;
1951 		return;
1952 	case EINTR:
1953 		/* REQUEST_SENSE interrupted by bus reset. */
1954 		xs->error = XS_RESET;
1955 		return;
1956 	case EIO:
1957 		 /* request sense coudn't be performed */
1958 		/*
1959 		 * XXX this isn't quite right but we don't have anything
1960 		 * better for now
1961 		 */
1962 		xs->error = XS_DRIVER_STUFFUP;
1963 		return;
1964 	default:
1965 		 /* Notify that request sense failed. */
1966 		xs->error = XS_DRIVER_STUFFUP;
1967 		scsipi_printaddr(periph);
1968 		printf("request sense failed with error %d\n", error);
1969 		return;
1970 	}
1971 }
1972 
1973 /*
1974  * scsipi_enqueue:
1975  *
1976  *	Enqueue an xfer on a channel.
1977  */
1978 static int
1979 scsipi_enqueue(struct scsipi_xfer *xs)
1980 {
1981 	struct scsipi_channel *chan = xs->xs_periph->periph_channel;
1982 	struct scsipi_xfer *qxs;
1983 
1984 	/*
1985 	 * If the xfer is to be polled, and there are already jobs on
1986 	 * the queue, we can't proceed.
1987 	 */
1988 	KASSERT(mutex_owned(chan_mtx(chan)));
1989 	if ((xs->xs_control & XS_CTL_POLL) != 0 &&
1990 	    TAILQ_FIRST(&chan->chan_queue) != NULL) {
1991 		xs->error = XS_DRIVER_STUFFUP;
1992 		return EAGAIN;
1993 	}
1994 
1995 	/*
1996 	 * If we have an URGENT xfer, it's an error recovery command
1997 	 * and it should just go on the head of the channel's queue.
1998 	 */
1999 	if (xs->xs_control & XS_CTL_URGENT) {
2000 		TAILQ_INSERT_HEAD(&chan->chan_queue, xs, channel_q);
2001 		goto out;
2002 	}
2003 
2004 	/*
2005 	 * If this xfer has already been on the queue before, we
2006 	 * need to reinsert it in the correct order.  That order is:
2007 	 *
2008 	 *	Immediately before the first xfer for this periph
2009 	 *	with a requeuecnt less than xs->xs_requeuecnt.
2010 	 *
2011 	 * Failing that, at the end of the queue.  (We'll end up
2012 	 * there naturally.)
2013 	 */
2014 	if (xs->xs_requeuecnt != 0) {
2015 		for (qxs = TAILQ_FIRST(&chan->chan_queue); qxs != NULL;
2016 		     qxs = TAILQ_NEXT(qxs, channel_q)) {
2017 			if (qxs->xs_periph == xs->xs_periph &&
2018 			    qxs->xs_requeuecnt < xs->xs_requeuecnt)
2019 				break;
2020 		}
2021 		if (qxs != NULL) {
2022 			TAILQ_INSERT_AFTER(&chan->chan_queue, qxs, xs,
2023 			    channel_q);
2024 			goto out;
2025 		}
2026 	}
2027 	TAILQ_INSERT_TAIL(&chan->chan_queue, xs, channel_q);
2028  out:
2029 	if (xs->xs_control & XS_CTL_THAW_PERIPH)
2030 		scsipi_periph_thaw_locked(xs->xs_periph, 1);
2031 	return 0;
2032 }
2033 
2034 /*
2035  * scsipi_run_queue:
2036  *
2037  *	Start as many xfers as possible running on the channel.
2038  */
2039 static void
2040 scsipi_run_queue(struct scsipi_channel *chan)
2041 {
2042 	struct scsipi_xfer *xs;
2043 	struct scsipi_periph *periph;
2044 
2045 	for (;;) {
2046 		mutex_enter(chan_mtx(chan));
2047 
2048 		/*
2049 		 * If the channel is frozen, we can't do any work right
2050 		 * now.
2051 		 */
2052 		if (chan->chan_qfreeze != 0) {
2053 			mutex_exit(chan_mtx(chan));
2054 			return;
2055 		}
2056 
2057 		/*
2058 		 * Look for work to do, and make sure we can do it.
2059 		 */
2060 		for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL;
2061 		     xs = TAILQ_NEXT(xs, channel_q)) {
2062 			periph = xs->xs_periph;
2063 
2064 			if ((periph->periph_sent >= periph->periph_openings) ||
2065 			    periph->periph_qfreeze != 0 ||
2066 			    (periph->periph_flags & PERIPH_UNTAG) != 0)
2067 				continue;
2068 
2069 			if ((periph->periph_flags &
2070 			    (PERIPH_RECOVERING | PERIPH_SENSE)) != 0 &&
2071 			    (xs->xs_control & XS_CTL_URGENT) == 0)
2072 				continue;
2073 
2074 			/*
2075 			 * We can issue this xfer!
2076 			 */
2077 			goto got_one;
2078 		}
2079 
2080 		/*
2081 		 * Can't find any work to do right now.
2082 		 */
2083 		mutex_exit(chan_mtx(chan));
2084 		return;
2085 
2086  got_one:
2087 		/*
2088 		 * Have an xfer to run.  Allocate a resource from
2089 		 * the adapter to run it.  If we can't allocate that
2090 		 * resource, we don't dequeue the xfer.
2091 		 */
2092 		if (scsipi_get_resource(chan) == 0) {
2093 			/*
2094 			 * Adapter is out of resources.  If the adapter
2095 			 * supports it, attempt to grow them.
2096 			 */
2097 			if (scsipi_grow_resources(chan) == 0) {
2098 				/*
2099 				 * Wasn't able to grow resources,
2100 				 * nothing more we can do.
2101 				 */
2102 				if (xs->xs_control & XS_CTL_POLL) {
2103 					scsipi_printaddr(xs->xs_periph);
2104 					printf("polling command but no "
2105 					    "adapter resources");
2106 					/* We'll panic shortly... */
2107 				}
2108 				mutex_exit(chan_mtx(chan));
2109 
2110 				/*
2111 				 * XXX: We should be able to note that
2112 				 * XXX: that resources are needed here!
2113 				 */
2114 				return;
2115 			}
2116 			/*
2117 			 * scsipi_grow_resources() allocated the resource
2118 			 * for us.
2119 			 */
2120 		}
2121 
2122 		/*
2123 		 * We have a resource to run this xfer, do it!
2124 		 */
2125 		TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2126 
2127 		/*
2128 		 * If the command is to be tagged, allocate a tag ID
2129 		 * for it.
2130 		 */
2131 		if (XS_CTL_TAGTYPE(xs) != 0)
2132 			scsipi_get_tag(xs);
2133 		else
2134 			periph->periph_flags |= PERIPH_UNTAG;
2135 		periph->periph_sent++;
2136 		mutex_exit(chan_mtx(chan));
2137 
2138 		scsipi_adapter_request(chan, ADAPTER_REQ_RUN_XFER, xs);
2139 	}
2140 #ifdef DIAGNOSTIC
2141 	panic("scsipi_run_queue: impossible");
2142 #endif
2143 }
2144 
2145 /*
2146  * scsipi_execute_xs:
2147  *
2148  *	Begin execution of an xfer, waiting for it to complete, if necessary.
2149  */
2150 int
2151 scsipi_execute_xs(struct scsipi_xfer *xs)
2152 {
2153 	struct scsipi_periph *periph = xs->xs_periph;
2154 	struct scsipi_channel *chan = periph->periph_channel;
2155 	int oasync, async, poll, error;
2156 
2157 	KASSERT(!cold);
2158 
2159 	scsipi_update_timeouts(xs);
2160 
2161 	(chan->chan_bustype->bustype_cmd)(xs);
2162 
2163 	xs->xs_status &= ~XS_STS_DONE;
2164 	xs->error = XS_NOERROR;
2165 	xs->resid = xs->datalen;
2166 	xs->status = SCSI_OK;
2167 
2168 #ifdef SCSIPI_DEBUG
2169 	if (xs->xs_periph->periph_dbflags & SCSIPI_DB3) {
2170 		printf("scsipi_execute_xs: ");
2171 		show_scsipi_xs(xs);
2172 		printf("\n");
2173 	}
2174 #endif
2175 
2176 	/*
2177 	 * Deal with command tagging:
2178 	 *
2179 	 *	- If the device's current operating mode doesn't
2180 	 *	  include tagged queueing, clear the tag mask.
2181 	 *
2182 	 *	- If the device's current operating mode *does*
2183 	 *	  include tagged queueing, set the tag_type in
2184 	 *	  the xfer to the appropriate byte for the tag
2185 	 *	  message.
2186 	 */
2187 	if ((PERIPH_XFER_MODE(periph) & PERIPH_CAP_TQING) == 0 ||
2188 		(xs->xs_control & XS_CTL_REQSENSE)) {
2189 		xs->xs_control &= ~XS_CTL_TAGMASK;
2190 		xs->xs_tag_type = 0;
2191 	} else {
2192 		/*
2193 		 * If the request doesn't specify a tag, give Head
2194 		 * tags to URGENT operations and Simple tags to
2195 		 * everything else.
2196 		 */
2197 		if (XS_CTL_TAGTYPE(xs) == 0) {
2198 			if (xs->xs_control & XS_CTL_URGENT)
2199 				xs->xs_control |= XS_CTL_HEAD_TAG;
2200 			else
2201 				xs->xs_control |= XS_CTL_SIMPLE_TAG;
2202 		}
2203 
2204 		switch (XS_CTL_TAGTYPE(xs)) {
2205 		case XS_CTL_ORDERED_TAG:
2206 			xs->xs_tag_type = MSG_ORDERED_Q_TAG;
2207 			break;
2208 
2209 		case XS_CTL_SIMPLE_TAG:
2210 			xs->xs_tag_type = MSG_SIMPLE_Q_TAG;
2211 			break;
2212 
2213 		case XS_CTL_HEAD_TAG:
2214 			xs->xs_tag_type = MSG_HEAD_OF_Q_TAG;
2215 			break;
2216 
2217 		default:
2218 			scsipi_printaddr(periph);
2219 			printf("invalid tag mask 0x%08x\n",
2220 			    XS_CTL_TAGTYPE(xs));
2221 			panic("scsipi_execute_xs");
2222 		}
2223 	}
2224 
2225 	/* If the adaptor wants us to poll, poll. */
2226 	if (chan->chan_adapter->adapt_flags & SCSIPI_ADAPT_POLL_ONLY)
2227 		xs->xs_control |= XS_CTL_POLL;
2228 
2229 	/*
2230 	 * If we don't yet have a completion thread, or we are to poll for
2231 	 * completion, clear the ASYNC flag.
2232 	 */
2233 	oasync =  (xs->xs_control & XS_CTL_ASYNC);
2234 	if (chan->chan_thread == NULL || (xs->xs_control & XS_CTL_POLL) != 0)
2235 		xs->xs_control &= ~XS_CTL_ASYNC;
2236 
2237 	async = (xs->xs_control & XS_CTL_ASYNC);
2238 	poll = (xs->xs_control & XS_CTL_POLL);
2239 
2240 #ifdef DIAGNOSTIC
2241 	if (oasync != 0 && xs->bp == NULL)
2242 		panic("scsipi_execute_xs: XS_CTL_ASYNC but no buf");
2243 #endif
2244 
2245 	/*
2246 	 * Enqueue the transfer.  If we're not polling for completion, this
2247 	 * should ALWAYS return `no error'.
2248 	 */
2249 	error = scsipi_enqueue(xs);
2250 	if (error) {
2251 		if (poll == 0) {
2252 			scsipi_printaddr(periph);
2253 			printf("not polling, but enqueue failed with %d\n",
2254 			    error);
2255 			panic("scsipi_execute_xs");
2256 		}
2257 
2258 		scsipi_printaddr(periph);
2259 		printf("should have flushed queue?\n");
2260 		goto free_xs;
2261 	}
2262 
2263 	mutex_exit(chan_mtx(chan));
2264  restarted:
2265 	scsipi_run_queue(chan);
2266 	mutex_enter(chan_mtx(chan));
2267 
2268 	/*
2269 	 * The xfer is enqueued, and possibly running.  If it's to be
2270 	 * completed asynchronously, just return now.
2271 	 */
2272 	if (async)
2273 		return 0;
2274 
2275 	/*
2276 	 * Not an asynchronous command; wait for it to complete.
2277 	 */
2278 	while ((xs->xs_status & XS_STS_DONE) == 0) {
2279 		if (poll) {
2280 			scsipi_printaddr(periph);
2281 			printf("polling command not done\n");
2282 			panic("scsipi_execute_xs");
2283 		}
2284 		cv_wait(xs_cv(xs), chan_mtx(chan));
2285 	}
2286 
2287 	/*
2288 	 * Command is complete.  scsipi_done() has awakened us to perform
2289 	 * the error handling.
2290 	 */
2291 	mutex_exit(chan_mtx(chan));
2292 	error = scsipi_complete(xs);
2293 	if (error == ERESTART)
2294 		goto restarted;
2295 
2296 	/*
2297 	 * If it was meant to run async and we cleared aync ourselve,
2298 	 * don't return an error here. It has already been handled
2299 	 */
2300 	if (oasync)
2301 		error = 0;
2302 	/*
2303 	 * Command completed successfully or fatal error occurred.  Fall
2304 	 * into....
2305 	 */
2306 	mutex_enter(chan_mtx(chan));
2307  free_xs:
2308 	scsipi_put_xs(xs);
2309 	mutex_exit(chan_mtx(chan));
2310 
2311 	/*
2312 	 * Kick the queue, keep it running in case it stopped for some
2313 	 * reason.
2314 	 */
2315 	scsipi_run_queue(chan);
2316 
2317 	mutex_enter(chan_mtx(chan));
2318 	return error;
2319 }
2320 
2321 /*
2322  * scsipi_completion_thread:
2323  *
2324  *	This is the completion thread.  We wait for errors on
2325  *	asynchronous xfers, and perform the error handling
2326  *	function, restarting the command, if necessary.
2327  */
2328 static void
2329 scsipi_completion_thread(void *arg)
2330 {
2331 	struct scsipi_channel *chan = arg;
2332 	struct scsipi_xfer *xs;
2333 
2334 	if (chan->chan_init_cb)
2335 		(*chan->chan_init_cb)(chan, chan->chan_init_cb_arg);
2336 
2337 	mutex_enter(chan_mtx(chan));
2338 	chan->chan_flags |= SCSIPI_CHAN_TACTIVE;
2339 	for (;;) {
2340 		xs = TAILQ_FIRST(&chan->chan_complete);
2341 		if (xs == NULL && chan->chan_tflags == 0) {
2342 			/* nothing to do; wait */
2343 			cv_wait(chan_cv_complete(chan), chan_mtx(chan));
2344 			continue;
2345 		}
2346 		if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2347 			/* call chan_callback from thread context */
2348 			chan->chan_tflags &= ~SCSIPI_CHANT_CALLBACK;
2349 			chan->chan_callback(chan, chan->chan_callback_arg);
2350 			continue;
2351 		}
2352 		if (chan->chan_tflags & SCSIPI_CHANT_GROWRES) {
2353 			/* attempt to get more openings for this channel */
2354 			chan->chan_tflags &= ~SCSIPI_CHANT_GROWRES;
2355 			mutex_exit(chan_mtx(chan));
2356 			scsipi_adapter_request(chan,
2357 			    ADAPTER_REQ_GROW_RESOURCES, NULL);
2358 			scsipi_channel_thaw(chan, 1);
2359 			if (chan->chan_tflags & SCSIPI_CHANT_GROWRES)
2360 				kpause("scsizzz", FALSE, hz/10, NULL);
2361 			mutex_enter(chan_mtx(chan));
2362 			continue;
2363 		}
2364 		if (chan->chan_tflags & SCSIPI_CHANT_KICK) {
2365 			/* explicitly run the queues for this channel */
2366 			chan->chan_tflags &= ~SCSIPI_CHANT_KICK;
2367 			mutex_exit(chan_mtx(chan));
2368 			scsipi_run_queue(chan);
2369 			mutex_enter(chan_mtx(chan));
2370 			continue;
2371 		}
2372 		if (chan->chan_tflags & SCSIPI_CHANT_SHUTDOWN) {
2373 			break;
2374 		}
2375 		if (xs) {
2376 			TAILQ_REMOVE(&chan->chan_complete, xs, channel_q);
2377 			mutex_exit(chan_mtx(chan));
2378 
2379 			/*
2380 			 * Have an xfer with an error; process it.
2381 			 */
2382 			(void) scsipi_complete(xs);
2383 
2384 			/*
2385 			 * Kick the queue; keep it running if it was stopped
2386 			 * for some reason.
2387 			 */
2388 			scsipi_run_queue(chan);
2389 			mutex_enter(chan_mtx(chan));
2390 		}
2391 	}
2392 
2393 	chan->chan_thread = NULL;
2394 
2395 	/* In case parent is waiting for us to exit. */
2396 	cv_broadcast(chan_cv_thread(chan));
2397 	mutex_exit(chan_mtx(chan));
2398 
2399 	kthread_exit(0);
2400 }
2401 /*
2402  * scsipi_thread_call_callback:
2403  *
2404  * 	request to call a callback from the completion thread
2405  */
2406 int
2407 scsipi_thread_call_callback(struct scsipi_channel *chan,
2408     void (*callback)(struct scsipi_channel *, void *), void *arg)
2409 {
2410 
2411 	mutex_enter(chan_mtx(chan));
2412 	if ((chan->chan_flags & SCSIPI_CHAN_TACTIVE) == 0) {
2413 		/* kernel thread doesn't exist yet */
2414 		mutex_exit(chan_mtx(chan));
2415 		return ESRCH;
2416 	}
2417 	if (chan->chan_tflags & SCSIPI_CHANT_CALLBACK) {
2418 		mutex_exit(chan_mtx(chan));
2419 		return EBUSY;
2420 	}
2421 	scsipi_channel_freeze(chan, 1);
2422 	chan->chan_callback = callback;
2423 	chan->chan_callback_arg = arg;
2424 	chan->chan_tflags |= SCSIPI_CHANT_CALLBACK;
2425 	cv_broadcast(chan_cv_complete(chan));
2426 	mutex_exit(chan_mtx(chan));
2427 	return 0;
2428 }
2429 
2430 /*
2431  * scsipi_async_event:
2432  *
2433  *	Handle an asynchronous event from an adapter.
2434  */
2435 void
2436 scsipi_async_event(struct scsipi_channel *chan, scsipi_async_event_t event,
2437     void *arg)
2438 {
2439 	bool lock = chan_running(chan) > 0;
2440 
2441 	if (lock)
2442 		mutex_enter(chan_mtx(chan));
2443 	switch (event) {
2444 	case ASYNC_EVENT_MAX_OPENINGS:
2445 		scsipi_async_event_max_openings(chan,
2446 		    (struct scsipi_max_openings *)arg);
2447 		break;
2448 
2449 	case ASYNC_EVENT_XFER_MODE:
2450 		if (chan->chan_bustype->bustype_async_event_xfer_mode) {
2451 			chan->chan_bustype->bustype_async_event_xfer_mode(
2452 			    chan, arg);
2453 		}
2454 		break;
2455 	case ASYNC_EVENT_RESET:
2456 		scsipi_async_event_channel_reset(chan);
2457 		break;
2458 	}
2459 	if (lock)
2460 		mutex_exit(chan_mtx(chan));
2461 }
2462 
2463 /*
2464  * scsipi_async_event_max_openings:
2465  *
2466  *	Update the maximum number of outstanding commands a
2467  *	device may have.
2468  */
2469 static void
2470 scsipi_async_event_max_openings(struct scsipi_channel *chan,
2471     struct scsipi_max_openings *mo)
2472 {
2473 	struct scsipi_periph *periph;
2474 	int minlun, maxlun;
2475 
2476 	if (mo->mo_lun == -1) {
2477 		/*
2478 		 * Wildcarded; apply it to all LUNs.
2479 		 */
2480 		minlun = 0;
2481 		maxlun = chan->chan_nluns - 1;
2482 	} else
2483 		minlun = maxlun = mo->mo_lun;
2484 
2485 	/* XXX This could really suck with a large LUN space. */
2486 	for (; minlun <= maxlun; minlun++) {
2487 		periph = scsipi_lookup_periph_locked(chan, mo->mo_target, minlun);
2488 		if (periph == NULL)
2489 			continue;
2490 
2491 		if (mo->mo_openings < periph->periph_openings)
2492 			periph->periph_openings = mo->mo_openings;
2493 		else if (mo->mo_openings > periph->periph_openings &&
2494 		    (periph->periph_flags & PERIPH_GROW_OPENINGS) != 0)
2495 			periph->periph_openings = mo->mo_openings;
2496 	}
2497 }
2498 
2499 /*
2500  * scsipi_set_xfer_mode:
2501  *
2502  *	Set the xfer mode for the specified I_T Nexus.
2503  */
2504 void
2505 scsipi_set_xfer_mode(struct scsipi_channel *chan, int target, int immed)
2506 {
2507 	struct scsipi_xfer_mode xm;
2508 	struct scsipi_periph *itperiph;
2509 	int lun;
2510 
2511 	/*
2512 	 * Go to the minimal xfer mode.
2513 	 */
2514 	xm.xm_target = target;
2515 	xm.xm_mode = 0;
2516 	xm.xm_period = 0;			/* ignored */
2517 	xm.xm_offset = 0;			/* ignored */
2518 
2519 	/*
2520 	 * Find the first LUN we know about on this I_T Nexus.
2521 	 */
2522 	for (itperiph = NULL, lun = 0; lun < chan->chan_nluns; lun++) {
2523 		itperiph = scsipi_lookup_periph(chan, target, lun);
2524 		if (itperiph != NULL)
2525 			break;
2526 	}
2527 	if (itperiph != NULL) {
2528 		xm.xm_mode = itperiph->periph_cap;
2529 		/*
2530 		 * Now issue the request to the adapter.
2531 		 */
2532 		scsipi_adapter_request(chan, ADAPTER_REQ_SET_XFER_MODE, &xm);
2533 		/*
2534 		 * If we want this to happen immediately, issue a dummy
2535 		 * command, since most adapters can't really negotiate unless
2536 		 * they're executing a job.
2537 		 */
2538 		if (immed != 0) {
2539 			(void) scsipi_test_unit_ready(itperiph,
2540 			    XS_CTL_DISCOVERY | XS_CTL_IGNORE_ILLEGAL_REQUEST |
2541 			    XS_CTL_IGNORE_NOT_READY |
2542 			    XS_CTL_IGNORE_MEDIA_CHANGE);
2543 		}
2544 	}
2545 }
2546 
2547 /*
2548  * scsipi_channel_reset:
2549  *
2550  *	handle scsi bus reset
2551  * called with channel lock held
2552  */
2553 static void
2554 scsipi_async_event_channel_reset(struct scsipi_channel *chan)
2555 {
2556 	struct scsipi_xfer *xs, *xs_next;
2557 	struct scsipi_periph *periph;
2558 	int target, lun;
2559 
2560 	/*
2561 	 * Channel has been reset. Also mark as reset pending REQUEST_SENSE
2562 	 * commands; as the sense is not available any more.
2563 	 * can't call scsipi_done() from here, as the command has not been
2564 	 * sent to the adapter yet (this would corrupt accounting).
2565 	 */
2566 
2567 	for (xs = TAILQ_FIRST(&chan->chan_queue); xs != NULL; xs = xs_next) {
2568 		xs_next = TAILQ_NEXT(xs, channel_q);
2569 		if (xs->xs_control & XS_CTL_REQSENSE) {
2570 			TAILQ_REMOVE(&chan->chan_queue, xs, channel_q);
2571 			xs->error = XS_RESET;
2572 			if ((xs->xs_control & XS_CTL_ASYNC) != 0)
2573 				TAILQ_INSERT_TAIL(&chan->chan_complete, xs,
2574 				    channel_q);
2575 		}
2576 	}
2577 	cv_broadcast(chan_cv_complete(chan));
2578 	/* Catch xs with pending sense which may not have a REQSENSE xs yet */
2579 	for (target = 0; target < chan->chan_ntargets; target++) {
2580 		if (target == chan->chan_id)
2581 			continue;
2582 		for (lun = 0; lun <  chan->chan_nluns; lun++) {
2583 			periph = scsipi_lookup_periph_locked(chan, target, lun);
2584 			if (periph) {
2585 				xs = periph->periph_xscheck;
2586 				if (xs)
2587 					xs->error = XS_RESET;
2588 			}
2589 		}
2590 	}
2591 }
2592 
2593 /*
2594  * scsipi_target_detach:
2595  *
2596  *	detach all periph associated with a I_T
2597  * 	must be called from valid thread context
2598  */
2599 int
2600 scsipi_target_detach(struct scsipi_channel *chan, int target, int lun,
2601     int flags)
2602 {
2603 	struct scsipi_periph *periph;
2604 	device_t tdev;
2605 	int ctarget, mintarget, maxtarget;
2606 	int clun, minlun, maxlun;
2607 	int error = 0;
2608 
2609 	if (target == -1) {
2610 		mintarget = 0;
2611 		maxtarget = chan->chan_ntargets;
2612 	} else {
2613 		if (target == chan->chan_id)
2614 			return EINVAL;
2615 		if (target < 0 || target >= chan->chan_ntargets)
2616 			return EINVAL;
2617 		mintarget = target;
2618 		maxtarget = target + 1;
2619 	}
2620 
2621 	if (lun == -1) {
2622 		minlun = 0;
2623 		maxlun = chan->chan_nluns;
2624 	} else {
2625 		if (lun < 0 || lun >= chan->chan_nluns)
2626 			return EINVAL;
2627 		minlun = lun;
2628 		maxlun = lun + 1;
2629 	}
2630 
2631 	/* for config_detach */
2632 	KERNEL_LOCK(1, curlwp);
2633 
2634 	mutex_enter(chan_mtx(chan));
2635 	for (ctarget = mintarget; ctarget < maxtarget; ctarget++) {
2636 		if (ctarget == chan->chan_id)
2637 			continue;
2638 
2639 		for (clun = minlun; clun < maxlun; clun++) {
2640 			periph = scsipi_lookup_periph_locked(chan, ctarget, clun);
2641 			if (periph == NULL)
2642 				continue;
2643 			tdev = periph->periph_dev;
2644 			mutex_exit(chan_mtx(chan));
2645 			error = config_detach(tdev, flags);
2646 			if (error)
2647 				goto out;
2648 			mutex_enter(chan_mtx(chan));
2649 			KASSERT(scsipi_lookup_periph_locked(chan, ctarget, clun) == NULL);
2650 		}
2651 	}
2652 	mutex_exit(chan_mtx(chan));
2653 
2654 out:
2655 	KERNEL_UNLOCK_ONE(curlwp);
2656 
2657 	return error;
2658 }
2659 
2660 /*
2661  * scsipi_adapter_addref:
2662  *
2663  *	Add a reference to the adapter pointed to by the provided
2664  *	link, enabling the adapter if necessary.
2665  */
2666 int
2667 scsipi_adapter_addref(struct scsipi_adapter *adapt)
2668 {
2669 	int error = 0;
2670 
2671 	if (atomic_inc_uint_nv(&adapt->adapt_refcnt) == 1
2672 	    && adapt->adapt_enable != NULL) {
2673 		scsipi_adapter_lock(adapt);
2674 		error = scsipi_adapter_enable(adapt, 1);
2675 		scsipi_adapter_unlock(adapt);
2676 		if (error)
2677 			atomic_dec_uint(&adapt->adapt_refcnt);
2678 	}
2679 	return error;
2680 }
2681 
2682 /*
2683  * scsipi_adapter_delref:
2684  *
2685  *	Delete a reference to the adapter pointed to by the provided
2686  *	link, disabling the adapter if possible.
2687  */
2688 void
2689 scsipi_adapter_delref(struct scsipi_adapter *adapt)
2690 {
2691 
2692 	if (atomic_dec_uint_nv(&adapt->adapt_refcnt) == 0
2693 	    && adapt->adapt_enable != NULL) {
2694 		scsipi_adapter_lock(adapt);
2695 		(void) scsipi_adapter_enable(adapt, 0);
2696 		scsipi_adapter_unlock(adapt);
2697 	}
2698 }
2699 
2700 static struct scsipi_syncparam {
2701 	int	ss_factor;
2702 	int	ss_period;	/* ns * 100 */
2703 } scsipi_syncparams[] = {
2704 	{ 0x08,		 625 },	/* FAST-160 (Ultra320) */
2705 	{ 0x09,		1250 },	/* FAST-80 (Ultra160) */
2706 	{ 0x0a,		2500 },	/* FAST-40 40MHz (Ultra2) */
2707 	{ 0x0b,		3030 },	/* FAST-40 33MHz (Ultra2) */
2708 	{ 0x0c,		5000 },	/* FAST-20 (Ultra) */
2709 };
2710 static const int scsipi_nsyncparams =
2711     sizeof(scsipi_syncparams) / sizeof(scsipi_syncparams[0]);
2712 
2713 int
2714 scsipi_sync_period_to_factor(int period /* ns * 100 */)
2715 {
2716 	int i;
2717 
2718 	for (i = 0; i < scsipi_nsyncparams; i++) {
2719 		if (period <= scsipi_syncparams[i].ss_period)
2720 			return scsipi_syncparams[i].ss_factor;
2721 	}
2722 
2723 	return (period / 100) / 4;
2724 }
2725 
2726 int
2727 scsipi_sync_factor_to_period(int factor)
2728 {
2729 	int i;
2730 
2731 	for (i = 0; i < scsipi_nsyncparams; i++) {
2732 		if (factor == scsipi_syncparams[i].ss_factor)
2733 			return scsipi_syncparams[i].ss_period;
2734 	}
2735 
2736 	return (factor * 4) * 100;
2737 }
2738 
2739 int
2740 scsipi_sync_factor_to_freq(int factor)
2741 {
2742 	int i;
2743 
2744 	for (i = 0; i < scsipi_nsyncparams; i++) {
2745 		if (factor == scsipi_syncparams[i].ss_factor)
2746 			return 100000000 / scsipi_syncparams[i].ss_period;
2747 	}
2748 
2749 	return 10000000 / ((factor * 4) * 10);
2750 }
2751 
2752 static inline void
2753 scsipi_adapter_lock(struct scsipi_adapter *adapt)
2754 {
2755 
2756 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2757 		KERNEL_LOCK(1, NULL);
2758 }
2759 
2760 static inline void
2761 scsipi_adapter_unlock(struct scsipi_adapter *adapt)
2762 {
2763 
2764 	if ((adapt->adapt_flags & SCSIPI_ADAPT_MPSAFE) == 0)
2765 		KERNEL_UNLOCK_ONE(NULL);
2766 }
2767 
2768 void
2769 scsipi_adapter_minphys(struct scsipi_channel *chan, struct buf *bp)
2770 {
2771 	struct scsipi_adapter *adapt = chan->chan_adapter;
2772 
2773 	scsipi_adapter_lock(adapt);
2774 	(adapt->adapt_minphys)(bp);
2775 	scsipi_adapter_unlock(chan->chan_adapter);
2776 }
2777 
2778 void
2779 scsipi_adapter_request(struct scsipi_channel *chan,
2780 	scsipi_adapter_req_t req, void *arg)
2781 
2782 {
2783 	struct scsipi_adapter *adapt = chan->chan_adapter;
2784 
2785 	scsipi_adapter_lock(adapt);
2786 	(adapt->adapt_request)(chan, req, arg);
2787 	scsipi_adapter_unlock(adapt);
2788 }
2789 
2790 int
2791 scsipi_adapter_ioctl(struct scsipi_channel *chan, u_long cmd,
2792 	void *data, int flag, struct proc *p)
2793 {
2794 	struct scsipi_adapter *adapt = chan->chan_adapter;
2795 	int error;
2796 
2797 	if (adapt->adapt_ioctl == NULL)
2798 		return ENOTTY;
2799 
2800 	scsipi_adapter_lock(adapt);
2801 	error = (adapt->adapt_ioctl)(chan, cmd, data, flag, p);
2802 	scsipi_adapter_unlock(adapt);
2803 	return error;
2804 }
2805 
2806 int
2807 scsipi_adapter_enable(struct scsipi_adapter *adapt, int enable)
2808 {
2809 	int error;
2810 
2811 	scsipi_adapter_lock(adapt);
2812 	error = (adapt->adapt_enable)(adapt->adapt_dev, enable);
2813 	scsipi_adapter_unlock(adapt);
2814 	return error;
2815 }
2816 
2817 #ifdef SCSIPI_DEBUG
2818 /*
2819  * Given a scsipi_xfer, dump the request, in all its glory
2820  */
2821 void
2822 show_scsipi_xs(struct scsipi_xfer *xs)
2823 {
2824 
2825 	printf("xs(%p): ", xs);
2826 	printf("xs_control(0x%08x)", xs->xs_control);
2827 	printf("xs_status(0x%08x)", xs->xs_status);
2828 	printf("periph(%p)", xs->xs_periph);
2829 	printf("retr(0x%x)", xs->xs_retries);
2830 	printf("timo(0x%x)", xs->timeout);
2831 	printf("cmd(%p)", xs->cmd);
2832 	printf("len(0x%x)", xs->cmdlen);
2833 	printf("data(%p)", xs->data);
2834 	printf("len(0x%x)", xs->datalen);
2835 	printf("res(0x%x)", xs->resid);
2836 	printf("err(0x%x)", xs->error);
2837 	printf("bp(%p)", xs->bp);
2838 	show_scsipi_cmd(xs);
2839 }
2840 
2841 void
2842 show_scsipi_cmd(struct scsipi_xfer *xs)
2843 {
2844 	u_char *b = (u_char *) xs->cmd;
2845 	int i = 0;
2846 
2847 	scsipi_printaddr(xs->xs_periph);
2848 	printf(" command: ");
2849 
2850 	if ((xs->xs_control & XS_CTL_RESET) == 0) {
2851 		while (i < xs->cmdlen) {
2852 			if (i)
2853 				printf(",");
2854 			printf("0x%x", b[i++]);
2855 		}
2856 		printf("-[%d bytes]\n", xs->datalen);
2857 		if (xs->datalen)
2858 			show_mem(xs->data, uimin(64, xs->datalen));
2859 	} else
2860 		printf("-RESET-\n");
2861 }
2862 
2863 void
2864 show_mem(u_char *address, int num)
2865 {
2866 	int x;
2867 
2868 	printf("------------------------------");
2869 	for (x = 0; x < num; x++) {
2870 		if ((x % 16) == 0)
2871 			printf("\n%03d: ", x);
2872 		printf("%02x ", *address++);
2873 	}
2874 	printf("\n------------------------------\n");
2875 }
2876 #endif /* SCSIPI_DEBUG */
2877