xref: /netbsd-src/sys/dev/ata/ata.c (revision 796c32c94f6e154afc9de0f63da35c91bb739b45)
1 /*	$NetBSD: ata.c,v 1.141 2017/10/28 04:53:54 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 1998, 2001 Manuel Bouyer.  All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: ata.c,v 1.141 2017/10/28 04:53:54 riastradh Exp $");
29 
30 #include "opt_ata.h"
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/device.h>
37 #include <sys/conf.h>
38 #include <sys/fcntl.h>
39 #include <sys/proc.h>
40 #include <sys/kthread.h>
41 #include <sys/errno.h>
42 #include <sys/ataio.h>
43 #include <sys/kmem.h>
44 #include <sys/intr.h>
45 #include <sys/bus.h>
46 #include <sys/once.h>
47 #include <sys/bitops.h>
48 
49 #define ATABUS_PRIVATE
50 
51 #include <dev/ata/ataconf.h>
52 #include <dev/ata/atareg.h>
53 #include <dev/ata/atavar.h>
54 #include <dev/ic/wdcvar.h>	/* for PIOBM */
55 
56 #include "ioconf.h"
57 #include "locators.h"
58 
59 #include "atapibus.h"
60 #include "ataraid.h"
61 #include "sata_pmp.h"
62 
63 #if NATARAID > 0
64 #include <dev/ata/ata_raidvar.h>
65 #endif
66 #if NSATA_PMP > 0
67 #include <dev/ata/satapmpvar.h>
68 #endif
69 #include <dev/ata/satapmpreg.h>
70 
71 #define DEBUG_FUNCS  0x08
72 #define DEBUG_PROBE  0x10
73 #define DEBUG_DETACH 0x20
74 #define	DEBUG_XFERS  0x40
75 #ifdef ATADEBUG
76 #ifndef ATADEBUG_MASK
77 #define ATADEBUG_MASK 0
78 #endif
79 int atadebug_mask = ATADEBUG_MASK;
80 #define ATADEBUG_PRINT(args, level) \
81 	if (atadebug_mask & (level)) \
82 		printf args
83 #else
84 #define ATADEBUG_PRINT(args, level)
85 #endif
86 
87 static ONCE_DECL(ata_init_ctrl);
88 
89 /*
90  * A queue of atabus instances, used to ensure the same bus probe order
91  * for a given hardware configuration at each boot.  Kthread probing
92  * devices on a atabus.  Only one probing at once.
93  */
94 static TAILQ_HEAD(, atabus_initq)	atabus_initq_head;
95 static kmutex_t				atabus_qlock;
96 static kcondvar_t			atabus_qcv;
97 static lwp_t *				atabus_cfg_lwp;
98 
99 /*****************************************************************************
100  * ATA bus layer.
101  *
102  * ATA controllers attach an atabus instance, which handles probing the bus
103  * for drives, etc.
104  *****************************************************************************/
105 
106 dev_type_open(atabusopen);
107 dev_type_close(atabusclose);
108 dev_type_ioctl(atabusioctl);
109 
110 const struct cdevsw atabus_cdevsw = {
111 	.d_open = atabusopen,
112 	.d_close = atabusclose,
113 	.d_read = noread,
114 	.d_write = nowrite,
115 	.d_ioctl = atabusioctl,
116 	.d_stop = nostop,
117 	.d_tty = notty,
118 	.d_poll = nopoll,
119 	.d_mmap = nommap,
120 	.d_kqfilter = nokqfilter,
121 	.d_discard = nodiscard,
122 	.d_flag = D_OTHER
123 };
124 
125 static void atabus_childdetached(device_t, device_t);
126 static int atabus_rescan(device_t, const char *, const int *);
127 static bool atabus_resume(device_t, const pmf_qual_t *);
128 static bool atabus_suspend(device_t, const pmf_qual_t *);
129 static void atabusconfig_thread(void *);
130 
131 static void ata_channel_idle(struct ata_channel *);
132 static void ata_channel_thaw_locked(struct ata_channel *);
133 static void ata_activate_xfer_locked(struct ata_channel *, struct ata_xfer *);
134 static void ata_channel_freeze_locked(struct ata_channel *);
135 static void ata_thread_wake_locked(struct ata_channel *);
136 
137 /*
138  * atabus_init:
139  *
140  *	Initialize ATA subsystem structures.
141  */
142 static int
143 atabus_init(void)
144 {
145 
146 	TAILQ_INIT(&atabus_initq_head);
147 	mutex_init(&atabus_qlock, MUTEX_DEFAULT, IPL_NONE);
148 	cv_init(&atabus_qcv, "atainitq");
149 	return 0;
150 }
151 
152 /*
153  * atabusprint:
154  *
155  *	Autoconfiguration print routine used by ATA controllers when
156  *	attaching an atabus instance.
157  */
158 int
159 atabusprint(void *aux, const char *pnp)
160 {
161 	struct ata_channel *chan = aux;
162 
163 	if (pnp)
164 		aprint_normal("atabus at %s", pnp);
165 	aprint_normal(" channel %d", chan->ch_channel);
166 
167 	return (UNCONF);
168 }
169 
170 /*
171  * ataprint:
172  *
173  *	Autoconfiguration print routine.
174  */
175 int
176 ataprint(void *aux, const char *pnp)
177 {
178 	struct ata_device *adev = aux;
179 
180 	if (pnp)
181 		aprint_normal("wd at %s", pnp);
182 	aprint_normal(" drive %d", adev->adev_drv_data->drive);
183 
184 	return (UNCONF);
185 }
186 
187 /*
188  * ata_channel_attach:
189  *
190  *	Common parts of attaching an atabus to an ATA controller channel.
191  */
192 void
193 ata_channel_attach(struct ata_channel *chp)
194 {
195 	if (chp->ch_flags & ATACH_DISABLED)
196 		return;
197 
198 	ata_channel_init(chp);
199 
200 	KASSERT(chp->ch_queue != NULL);
201 
202 	chp->atabus = config_found_ia(chp->ch_atac->atac_dev, "ata", chp,
203 		atabusprint);
204 }
205 
206 /*
207  * ata_channel_detach:
208  *
209  *	Common parts of detaching an atabus to an ATA controller channel.
210  */
211 void
212 ata_channel_detach(struct ata_channel *chp)
213 {
214 	if (chp->ch_flags & ATACH_DISABLED)
215 		return;
216 
217 	ata_channel_destroy(chp);
218 }
219 
220 static void
221 atabusconfig(struct atabus_softc *atabus_sc)
222 {
223 	struct ata_channel *chp = atabus_sc->sc_chan;
224 	struct atac_softc *atac = chp->ch_atac;
225 	struct atabus_initq *atabus_initq = NULL;
226 	int i, error;
227 
228 	/* we are in the atabus's thread context */
229 	ata_channel_lock(chp);
230 	chp->ch_flags |= ATACH_TH_RUN;
231 	ata_channel_unlock(chp);
232 
233 	/*
234 	 * Probe for the drives attached to controller, unless a PMP
235 	 * is already known
236 	 */
237 	/* XXX for SATA devices we will power up all drives at once */
238 	if (chp->ch_satapmp_nports == 0)
239 		(*atac->atac_probe)(chp);
240 
241 	if (chp->ch_ndrives >= 2) {
242 		ATADEBUG_PRINT(("atabusattach: ch_drive_type 0x%x 0x%x\n",
243 		    chp->ch_drive[0].drive_type, chp->ch_drive[1].drive_type),
244 		    DEBUG_PROBE);
245 	}
246 
247 	/* next operations will occurs in a separate thread */
248 	ata_channel_lock(chp);
249 	chp->ch_flags &= ~ATACH_TH_RUN;
250 	ata_channel_unlock(chp);
251 
252 	/* Make sure the devices probe in atabus order to avoid jitter. */
253 	mutex_enter(&atabus_qlock);
254 	for (;;) {
255 		atabus_initq = TAILQ_FIRST(&atabus_initq_head);
256 		if (atabus_initq->atabus_sc == atabus_sc)
257 			break;
258 		cv_wait(&atabus_qcv, &atabus_qlock);
259 	}
260 	mutex_exit(&atabus_qlock);
261 
262 	ata_channel_lock(chp);
263 
264 	/* If no drives, abort here */
265 	if (chp->ch_drive == NULL)
266 		goto out;
267 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
268 	for (i = 0; i < chp->ch_ndrives; i++)
269 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE)
270 			break;
271 	if (i == chp->ch_ndrives)
272 		goto out;
273 
274 	/* Shortcut in case we've been shutdown */
275 	if (chp->ch_flags & ATACH_SHUTDOWN)
276 		goto out;
277 
278 	ata_channel_unlock(chp);
279 
280 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabusconfig_thread,
281 	    atabus_sc, &atabus_cfg_lwp,
282 	    "%scnf", device_xname(atac->atac_dev))) != 0)
283 		aprint_error_dev(atac->atac_dev,
284 		    "unable to create config thread: error %d\n", error);
285 	return;
286 
287  out:
288 	ata_channel_unlock(chp);
289 
290 	mutex_enter(&atabus_qlock);
291 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
292 	cv_broadcast(&atabus_qcv);
293 	mutex_exit(&atabus_qlock);
294 
295 	free(atabus_initq, M_DEVBUF);
296 
297 	ata_delref(chp);
298 
299 	config_pending_decr(atac->atac_dev);
300 }
301 
302 /*
303  * atabus_configthread: finish attach of atabus's childrens, in a separate
304  * kernel thread.
305  */
306 static void
307 atabusconfig_thread(void *arg)
308 {
309 	struct atabus_softc *atabus_sc = arg;
310 	struct ata_channel *chp = atabus_sc->sc_chan;
311 	struct atac_softc *atac = chp->ch_atac;
312 	struct atabus_initq *atabus_initq = NULL;
313 	int i, s;
314 
315 	/* XXX seems wrong */
316 	mutex_enter(&atabus_qlock);
317 	atabus_initq = TAILQ_FIRST(&atabus_initq_head);
318 	KASSERT(atabus_initq->atabus_sc == atabus_sc);
319 	mutex_exit(&atabus_qlock);
320 
321 	/*
322 	 * First look for a port multiplier
323 	 */
324 	if (chp->ch_ndrives == PMP_MAX_DRIVES &&
325 	    chp->ch_drive[PMP_PORT_CTL].drive_type == ATA_DRIVET_PM) {
326 #if NSATA_PMP > 0
327 		satapmp_attach(chp);
328 #else
329 		aprint_error_dev(atabus_sc->sc_dev,
330 		    "SATA port multiplier not supported\n");
331 		/* no problems going on, all drives are ATA_DRIVET_NONE */
332 #endif
333 	}
334 
335 	/*
336 	 * Attach an ATAPI bus, if needed.
337 	 */
338 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
339 	for (i = 0; i < chp->ch_ndrives && chp->atapibus == NULL; i++) {
340 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI) {
341 #if NATAPIBUS > 0
342 			(*atac->atac_atapibus_attach)(atabus_sc);
343 #else
344 			/*
345 			 * Fake the autoconfig "not configured" message
346 			 */
347 			aprint_normal("atapibus at %s not configured\n",
348 			    device_xname(atac->atac_dev));
349 			chp->atapibus = NULL;
350 			s = splbio();
351 			for (i = 0; i < chp->ch_ndrives; i++) {
352 				if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
353 					chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
354 			}
355 			splx(s);
356 #endif
357 			break;
358 		}
359 	}
360 
361 	for (i = 0; i < chp->ch_ndrives; i++) {
362 		struct ata_device adev;
363 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATA &&
364 		    chp->ch_drive[i].drive_type != ATA_DRIVET_OLD) {
365 			continue;
366 		}
367 		if (chp->ch_drive[i].drv_softc != NULL)
368 			continue;
369 		memset(&adev, 0, sizeof(struct ata_device));
370 		adev.adev_bustype = atac->atac_bustype_ata;
371 		adev.adev_channel = chp->ch_channel;
372 		adev.adev_drv_data = &chp->ch_drive[i];
373 		chp->ch_drive[i].drv_softc = config_found_ia(atabus_sc->sc_dev,
374 		    "ata_hl", &adev, ataprint);
375 		if (chp->ch_drive[i].drv_softc != NULL) {
376 			ata_probe_caps(&chp->ch_drive[i]);
377 		} else {
378 			s = splbio();
379 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
380 			splx(s);
381 		}
382 	}
383 
384 	/* now that we know the drives, the controller can set its modes */
385 	if (atac->atac_set_modes) {
386 		(*atac->atac_set_modes)(chp);
387 		ata_print_modes(chp);
388 	}
389 #if NATARAID > 0
390 	if (atac->atac_cap & ATAC_CAP_RAID) {
391 		for (i = 0; i < chp->ch_ndrives; i++) {
392 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATA) {
393 				ata_raid_check_component(
394 				    chp->ch_drive[i].drv_softc);
395 			}
396 		}
397 	}
398 #endif /* NATARAID > 0 */
399 
400 	/*
401 	 * reset drive_flags for unattached devices, reset state for attached
402 	 * ones
403 	 */
404 	s = splbio();
405 	for (i = 0; i < chp->ch_ndrives; i++) {
406 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
407 			continue;
408 		if (chp->ch_drive[i].drv_softc == NULL) {
409 			chp->ch_drive[i].drive_flags = 0;
410 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
411 		} else
412 			chp->ch_drive[i].state = 0;
413 	}
414 	splx(s);
415 
416 	mutex_enter(&atabus_qlock);
417 	TAILQ_REMOVE(&atabus_initq_head, atabus_initq, atabus_initq);
418 	cv_broadcast(&atabus_qcv);
419 	mutex_exit(&atabus_qlock);
420 
421 	free(atabus_initq, M_DEVBUF);
422 
423 	ata_delref(chp);
424 
425 	config_pending_decr(atac->atac_dev);
426 	kthread_exit(0);
427 }
428 
429 /*
430  * atabus_thread:
431  *
432  *	Worker thread for the ATA bus.
433  */
434 static void
435 atabus_thread(void *arg)
436 {
437 	struct atabus_softc *sc = arg;
438 	struct ata_channel *chp = sc->sc_chan;
439 	struct ata_queue *chq = chp->ch_queue;
440 	struct ata_xfer *xfer;
441 	int i, rv, s;
442 
443 	ata_channel_lock(chp);
444 	chp->ch_flags |= ATACH_TH_RUN;
445 
446 	/*
447 	 * Probe the drives.  Reset type to indicate to controllers
448 	 * that can re-probe that all drives must be probed..
449 	 *
450 	 * Note: ch_ndrives may be changed during the probe.
451 	 */
452 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
453 	for (i = 0; i < chp->ch_ndrives; i++) {
454 		chp->ch_drive[i].drive_flags = 0;
455 		chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
456 	}
457 	ata_channel_unlock(chp);
458 
459 	atabusconfig(sc);
460 
461 	ata_channel_lock(chp);
462 	for (;;) {
463 		if ((chp->ch_flags & (ATACH_TH_RESET | ATACH_SHUTDOWN)) == 0 &&
464 		    (chq->queue_active == 0 || chq->queue_freeze == 0)) {
465 			chp->ch_flags &= ~ATACH_TH_RUN;
466 			cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
467 			chp->ch_flags |= ATACH_TH_RUN;
468 		}
469 		if (chp->ch_flags & ATACH_SHUTDOWN) {
470 			break;
471 		}
472 		if (chp->ch_flags & ATACH_TH_RESCAN) {
473 			chp->ch_flags &= ~ATACH_TH_RESCAN;
474 			ata_channel_unlock(chp);
475 			atabusconfig(sc);
476 			ata_channel_lock(chp);
477 		}
478 		if (chp->ch_flags & ATACH_TH_RESET) {
479 			/* ata_reset_channel() will unfreeze the channel */
480 			ata_channel_unlock(chp);
481 			s = splbio();
482 			ata_reset_channel(chp, AT_WAIT | chp->ch_reset_flags);
483 			splx(s);
484 			ata_channel_lock(chp);
485 		} else if (chq->queue_active > 0 && chq->queue_freeze == 1) {
486 			/*
487 			 * Caller has bumped queue_freeze, decrease it. This
488 			 * flow shalt never be executed for NCQ commands.
489 			 */
490 			KASSERT((chp->ch_flags & ATACH_NCQ) == 0);
491 			KASSERT(chq->queue_active == 1);
492 
493 			ata_channel_thaw_locked(chp);
494 			xfer = ata_queue_get_active_xfer_locked(chp);
495 
496 			KASSERT(xfer != NULL);
497 			KASSERT((xfer->c_flags & C_POLL) == 0);
498 
499 			switch ((rv = ata_xfer_start(xfer))) {
500 			case ATASTART_STARTED:
501 			case ATASTART_POLL:
502 			case ATASTART_ABORT:
503 				break;
504 			case ATASTART_TH:
505 			default:
506 				panic("%s: ata_xfer_start() unexpected rv %d",
507 				    __func__, rv);
508 				/* NOTREACHED */
509 			}
510 		} else if (chq->queue_freeze > 1)
511 			panic("%s: queue_freeze", __func__);
512 	}
513 	chp->ch_thread = NULL;
514 	cv_signal(&chp->ch_thr_idle);
515 	ata_channel_unlock(chp);
516 	kthread_exit(0);
517 }
518 
519 static void
520 ata_thread_wake_locked(struct ata_channel *chp)
521 {
522 	KASSERT(mutex_owned(&chp->ch_lock));
523 	ata_channel_freeze_locked(chp);
524 	cv_signal(&chp->ch_thr_idle);
525 }
526 
527 /*
528  * atabus_match:
529  *
530  *	Autoconfiguration match routine.
531  */
532 static int
533 atabus_match(device_t parent, cfdata_t cf, void *aux)
534 {
535 	struct ata_channel *chp = aux;
536 
537 	if (chp == NULL)
538 		return (0);
539 
540 	if (cf->cf_loc[ATACF_CHANNEL] != chp->ch_channel &&
541 	    cf->cf_loc[ATACF_CHANNEL] != ATACF_CHANNEL_DEFAULT)
542 		return (0);
543 
544 	return (1);
545 }
546 
547 /*
548  * atabus_attach:
549  *
550  *	Autoconfiguration attach routine.
551  */
552 static void
553 atabus_attach(device_t parent, device_t self, void *aux)
554 {
555 	struct atabus_softc *sc = device_private(self);
556 	struct ata_channel *chp = aux;
557 	struct atabus_initq *initq;
558 	int error;
559 
560 	sc->sc_chan = chp;
561 
562 	aprint_normal("\n");
563 	aprint_naive("\n");
564 
565 	sc->sc_dev = self;
566 
567 	if (ata_addref(chp))
568 		return;
569 
570 	RUN_ONCE(&ata_init_ctrl, atabus_init);
571 
572 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
573 	initq->atabus_sc = sc;
574 	mutex_enter(&atabus_qlock);
575 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
576 	mutex_exit(&atabus_qlock);
577 	config_pending_incr(sc->sc_dev);
578 
579 	if ((error = kthread_create(PRI_NONE, 0, NULL, atabus_thread, sc,
580 	    &chp->ch_thread, "%s", device_xname(self))) != 0)
581 		aprint_error_dev(self,
582 		    "unable to create kernel thread: error %d\n", error);
583 
584 	if (!pmf_device_register(self, atabus_suspend, atabus_resume))
585 		aprint_error_dev(self, "couldn't establish power handler\n");
586 }
587 
588 /*
589  * atabus_detach:
590  *
591  *	Autoconfiguration detach routine.
592  */
593 static int
594 atabus_detach(device_t self, int flags)
595 {
596 	struct atabus_softc *sc = device_private(self);
597 	struct ata_channel *chp = sc->sc_chan;
598 	device_t dev = NULL;
599 	int i, error = 0;
600 
601 	/* Shutdown the channel. */
602 	ata_channel_lock(chp);
603 	chp->ch_flags |= ATACH_SHUTDOWN;
604 	while (chp->ch_thread != NULL) {
605 		cv_signal(&chp->ch_thr_idle);
606 		cv_wait(&chp->ch_thr_idle, &chp->ch_lock);
607 	}
608 	ata_channel_unlock(chp);
609 
610 	/*
611 	 * Detach atapibus and its children.
612 	 */
613 	if ((dev = chp->atapibus) != NULL) {
614 		ATADEBUG_PRINT(("atabus_detach: %s: detaching %s\n",
615 		    device_xname(self), device_xname(dev)), DEBUG_DETACH);
616 
617 		error = config_detach(dev, flags);
618 		if (error)
619 			goto out;
620 		KASSERT(chp->atapibus == NULL);
621 	}
622 
623 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
624 
625 	/*
626 	 * Detach our other children.
627 	 */
628 	for (i = 0; i < chp->ch_ndrives; i++) {
629 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
630 			continue;
631 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
632 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
633 		if ((dev = chp->ch_drive[i].drv_softc) != NULL) {
634 			ATADEBUG_PRINT(("%s.%d: %s: detaching %s\n", __func__,
635 			    __LINE__, device_xname(self), device_xname(dev)),
636 			    DEBUG_DETACH);
637 			error = config_detach(dev, flags);
638 			if (error)
639 				goto out;
640 			KASSERT(chp->ch_drive[i].drv_softc == NULL);
641 			KASSERT(chp->ch_drive[i].drive_type == 0);
642 		}
643 	}
644 	atabus_free_drives(chp);
645 
646  out:
647 #ifdef ATADEBUG
648 	if (dev != NULL && error != 0)
649 		ATADEBUG_PRINT(("%s: %s: error %d detaching %s\n", __func__,
650 		    device_xname(self), error, device_xname(dev)),
651 		    DEBUG_DETACH);
652 #endif /* ATADEBUG */
653 
654 	return (error);
655 }
656 
657 void
658 atabus_childdetached(device_t self, device_t child)
659 {
660 	bool found = false;
661 	struct atabus_softc *sc = device_private(self);
662 	struct ata_channel *chp = sc->sc_chan;
663 	int i;
664 
665 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
666 	/*
667 	 * atapibus detached.
668 	 */
669 	if (child == chp->atapibus) {
670 		chp->atapibus = NULL;
671 		found = true;
672 		for (i = 0; i < chp->ch_ndrives; i++) {
673 			if (chp->ch_drive[i].drive_type != ATA_DRIVET_ATAPI)
674 				continue;
675 			KASSERT(chp->ch_drive[i].drv_softc != NULL);
676 			chp->ch_drive[i].drv_softc = NULL;
677 			chp->ch_drive[i].drive_flags = 0;
678 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
679 		}
680 	}
681 
682 	/*
683 	 * Detach our other children.
684 	 */
685 	for (i = 0; i < chp->ch_ndrives; i++) {
686 		if (chp->ch_drive[i].drive_type == ATA_DRIVET_ATAPI)
687 			continue;
688 		if (child == chp->ch_drive[i].drv_softc) {
689 			chp->ch_drive[i].drv_softc = NULL;
690 			chp->ch_drive[i].drive_flags = 0;
691 			if (chp->ch_drive[i].drive_type == ATA_DRIVET_PM)
692 				chp->ch_satapmp_nports = 0;
693 			chp->ch_drive[i].drive_type = ATA_DRIVET_NONE;
694 			found = true;
695 		}
696 	}
697 
698 	if (!found)
699 		panic("%s: unknown child %p", device_xname(self),
700 		    (const void *)child);
701 }
702 
703 CFATTACH_DECL3_NEW(atabus, sizeof(struct atabus_softc),
704     atabus_match, atabus_attach, atabus_detach, NULL, atabus_rescan,
705     atabus_childdetached, DVF_DETACH_SHUTDOWN);
706 
707 /*****************************************************************************
708  * Common ATA bus operations.
709  *****************************************************************************/
710 
711 /* allocate/free the channel's ch_drive[] array */
712 int
713 atabus_alloc_drives(struct ata_channel *chp, int ndrives)
714 {
715 	int i;
716 	if (chp->ch_ndrives != ndrives)
717 		atabus_free_drives(chp);
718 	if (chp->ch_drive == NULL) {
719 		chp->ch_drive = malloc(
720 		    sizeof(struct ata_drive_datas) * ndrives,
721 		    M_DEVBUF, M_NOWAIT | M_ZERO);
722 	}
723 	if (chp->ch_drive == NULL) {
724 	    aprint_error_dev(chp->ch_atac->atac_dev,
725 		"can't alloc drive array\n");
726 	    chp->ch_ndrives = 0;
727 	    return ENOMEM;
728 	};
729 	for (i = 0; i < ndrives; i++) {
730 		chp->ch_drive[i].chnl_softc = chp;
731 		chp->ch_drive[i].drive = i;
732 	}
733 	chp->ch_ndrives = ndrives;
734 	return 0;
735 }
736 
737 void
738 atabus_free_drives(struct ata_channel *chp)
739 {
740 #ifdef DIAGNOSTIC
741 	int i;
742 	int dopanic = 0;
743 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
744 	for (i = 0; i < chp->ch_ndrives; i++) {
745 		if (chp->ch_drive[i].drive_type != ATA_DRIVET_NONE) {
746 			printf("%s: ch_drive[%d] type %d != ATA_DRIVET_NONE\n",
747 			    device_xname(chp->atabus), i,
748 			    chp->ch_drive[i].drive_type);
749 			dopanic = 1;
750 		}
751 		if (chp->ch_drive[i].drv_softc != NULL) {
752 			printf("%s: ch_drive[%d] attached to %s\n",
753 			    device_xname(chp->atabus), i,
754 			    device_xname(chp->ch_drive[i].drv_softc));
755 			dopanic = 1;
756 		}
757 	}
758 	if (dopanic)
759 		panic("atabus_free_drives");
760 #endif
761 
762 	if (chp->ch_drive == NULL)
763 		return;
764 	chp->ch_ndrives = 0;
765 	free(chp->ch_drive, M_DEVBUF);
766 	chp->ch_drive = NULL;
767 }
768 
769 /* Get the disk's parameters */
770 int
771 ata_get_params(struct ata_drive_datas *drvp, uint8_t flags,
772     struct ataparams *prms)
773 {
774 	struct ata_xfer *xfer;
775 	struct ata_channel *chp = drvp->chnl_softc;
776 	struct atac_softc *atac = chp->ch_atac;
777 	char *tb;
778 	int i, rv;
779 	uint16_t *p;
780 
781 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
782 
783 	xfer = ata_get_xfer(chp);
784 	if (xfer == NULL) {
785 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
786 		    DEBUG_FUNCS|DEBUG_PROBE);
787 		return CMD_AGAIN;
788 	}
789 
790 	tb = kmem_zalloc(ATA_BSIZE, KM_SLEEP);
791 	memset(prms, 0, sizeof(struct ataparams));
792 
793 	if (drvp->drive_type == ATA_DRIVET_ATA) {
794 		xfer->c_ata_c.r_command = WDCC_IDENTIFY;
795 		xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
796 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
797 		xfer->c_ata_c.timeout = 3000; /* 3s */
798 	} else if (drvp->drive_type == ATA_DRIVET_ATAPI) {
799 		xfer->c_ata_c.r_command = ATAPI_IDENTIFY_DEVICE;
800 		xfer->c_ata_c.r_st_bmask = 0;
801 		xfer->c_ata_c.r_st_pmask = WDCS_DRQ;
802 		xfer->c_ata_c.timeout = 10000; /* 10s */
803 	} else {
804 		ATADEBUG_PRINT(("ata_get_parms: no disks\n"),
805 		    DEBUG_FUNCS|DEBUG_PROBE);
806 		rv = CMD_ERR;
807 		goto out;
808 	}
809 	xfer->c_ata_c.flags = AT_READ | flags;
810 	xfer->c_ata_c.data = tb;
811 	xfer->c_ata_c.bcount = ATA_BSIZE;
812 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
813 						xfer) != ATACMD_COMPLETE) {
814 		ATADEBUG_PRINT(("ata_get_parms: wdc_exec_command failed\n"),
815 		    DEBUG_FUNCS|DEBUG_PROBE);
816 		rv = CMD_AGAIN;
817 		goto out;
818 	}
819 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
820 		ATADEBUG_PRINT(("ata_get_parms: ata_c.flags=0x%x\n",
821 		    xfer->c_ata_c.flags), DEBUG_FUNCS|DEBUG_PROBE);
822 		rv = CMD_ERR;
823 		goto out;
824 	}
825 	/* if we didn't read any data something is wrong */
826 	if ((xfer->c_ata_c.flags & AT_XFDONE) == 0) {
827 		rv = CMD_ERR;
828 		goto out;
829 	}
830 
831 	/* Read in parameter block. */
832 	memcpy(prms, tb, sizeof(struct ataparams));
833 
834 	/*
835 	 * Shuffle string byte order.
836 	 * ATAPI NEC, Mitsumi and Pioneer drives and
837 	 * old ATA TDK CompactFlash cards
838 	 * have different byte order.
839 	 */
840 #if BYTE_ORDER == BIG_ENDIAN
841 # define M(n)	prms->atap_model[(n) ^ 1]
842 #else
843 # define M(n)	prms->atap_model[n]
844 #endif
845 	if (
846 #if BYTE_ORDER == BIG_ENDIAN
847 	    !
848 #endif
849 	    ((drvp->drive_type == ATA_DRIVET_ATAPI) ?
850 	     ((M(0) == 'N' && M(1) == 'E') ||
851 	      (M(0) == 'F' && M(1) == 'X') ||
852 	      (M(0) == 'P' && M(1) == 'i')) :
853 	     ((M(0) == 'T' && M(1) == 'D' && M(2) == 'K')))) {
854 		rv = CMD_OK;
855 		goto out;
856 	     }
857 #undef M
858 	for (i = 0; i < sizeof(prms->atap_model); i += 2) {
859 		p = (uint16_t *)(prms->atap_model + i);
860 		*p = bswap16(*p);
861 	}
862 	for (i = 0; i < sizeof(prms->atap_serial); i += 2) {
863 		p = (uint16_t *)(prms->atap_serial + i);
864 		*p = bswap16(*p);
865 	}
866 	for (i = 0; i < sizeof(prms->atap_revision); i += 2) {
867 		p = (uint16_t *)(prms->atap_revision + i);
868 		*p = bswap16(*p);
869 	}
870 
871 	rv = CMD_OK;
872  out:
873 	kmem_free(tb, ATA_BSIZE);
874 	ata_free_xfer(chp, xfer);
875 	return rv;
876 }
877 
878 int
879 ata_set_mode(struct ata_drive_datas *drvp, uint8_t mode, uint8_t flags)
880 {
881 	struct ata_xfer *xfer;
882 	int rv;
883 	struct ata_channel *chp = drvp->chnl_softc;
884 	struct atac_softc *atac = chp->ch_atac;
885 
886 	ATADEBUG_PRINT(("ata_set_mode=0x%x\n", mode), DEBUG_FUNCS);
887 
888 	xfer = ata_get_xfer(chp);
889 	if (xfer == NULL) {
890 		ATADEBUG_PRINT(("%s: no xfer\n", __func__),
891 		    DEBUG_FUNCS|DEBUG_PROBE);
892 		return CMD_AGAIN;
893 	}
894 
895 	xfer->c_ata_c.r_command = SET_FEATURES;
896 	xfer->c_ata_c.r_st_bmask = 0;
897 	xfer->c_ata_c.r_st_pmask = 0;
898 	xfer->c_ata_c.r_features = WDSF_SET_MODE;
899 	xfer->c_ata_c.r_count = mode;
900 	xfer->c_ata_c.flags = flags;
901 	xfer->c_ata_c.timeout = 1000; /* 1s */
902 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
903 						xfer) != ATACMD_COMPLETE) {
904 		rv = CMD_AGAIN;
905 		goto out;
906 	}
907 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
908 		rv = CMD_ERR;
909 		goto out;
910 	}
911 
912 	rv = CMD_OK;
913 
914 out:
915 	ata_free_xfer(chp, xfer);
916 	return rv;
917 }
918 
919 int
920 ata_read_log_ext_ncq(struct ata_drive_datas *drvp, uint8_t flags,
921     uint8_t *slot, uint8_t *status, uint8_t *err)
922 {
923 	struct ata_xfer *xfer;
924 	int rv;
925 	struct ata_channel *chp = drvp->chnl_softc;
926 	struct atac_softc *atac = chp->ch_atac;
927 	uint8_t *tb, cksum, page;
928 
929 	ATADEBUG_PRINT(("%s\n", __func__), DEBUG_FUNCS);
930 
931 	/* Only NCQ ATA drives support/need this */
932 	if (drvp->drive_type != ATA_DRIVET_ATA ||
933 	    (drvp->drive_flags & ATA_DRIVE_NCQ) == 0)
934 		return EOPNOTSUPP;
935 
936 	xfer = ata_get_xfer_ext(chp, C_RECOVERY, 0);
937 
938 	tb = drvp->recovery_blk;
939 	memset(tb, 0, sizeof(drvp->recovery_blk));
940 
941 	/*
942 	 * We could use READ LOG DMA EXT if drive supports it (i.e.
943 	 * when it supports Streaming feature) to avoid PIO command,
944 	 * and to make this a little faster. Realistically, it
945 	 * should not matter.
946 	 */
947 	xfer->c_flags |= C_RECOVERY;
948 	xfer->c_ata_c.r_command = WDCC_READ_LOG_EXT;
949 	xfer->c_ata_c.r_lba = page = WDCC_LOG_PAGE_NCQ;
950 	xfer->c_ata_c.r_st_bmask = WDCS_DRDY;
951 	xfer->c_ata_c.r_st_pmask = WDCS_DRDY;
952 	xfer->c_ata_c.r_count = 1;
953 	xfer->c_ata_c.r_device = WDSD_LBA;
954 	xfer->c_ata_c.flags = AT_READ | AT_LBA | AT_LBA48 | flags;
955 	xfer->c_ata_c.timeout = 1000; /* 1s */
956 	xfer->c_ata_c.data = tb;
957 	xfer->c_ata_c.bcount = sizeof(drvp->recovery_blk);
958 
959 	if ((*atac->atac_bustype_ata->ata_exec_command)(drvp,
960 						xfer) != ATACMD_COMPLETE) {
961 		rv = EAGAIN;
962 		goto out;
963 	}
964 	if (xfer->c_ata_c.flags & (AT_ERROR | AT_TIMEOU | AT_DF)) {
965 		rv = EINVAL;
966 		goto out;
967 	}
968 
969 	cksum = 0;
970 	for (int i = 0; i < sizeof(drvp->recovery_blk); i++)
971 		cksum += tb[i];
972 	if (cksum != 0) {
973 		aprint_error_dev(drvp->drv_softc,
974 		    "invalid checksum %x for READ LOG EXT page %x\n",
975 		    cksum, page);
976 		rv = EINVAL;
977 		goto out;
978 	}
979 
980 	if (tb[0] & WDCC_LOG_NQ) {
981 		/* not queued command */
982 		rv = EOPNOTSUPP;
983 		goto out;
984 	}
985 
986 	*slot = tb[0] & 0x1f;
987 	*status = tb[2];
988 	*err = tb[3];
989 
990 	KASSERTMSG((*status & WDCS_ERR),
991 	    "%s: non-error command slot %d reported by READ LOG EXT page %x: "
992 	    "err %x status %x\n",
993 	    device_xname(drvp->drv_softc), *slot, page, *err, *status);
994 
995 	rv = 0;
996 
997 out:
998 	ata_free_xfer(chp, xfer);
999 	return rv;
1000 }
1001 
1002 #if NATA_DMA
1003 void
1004 ata_dmaerr(struct ata_drive_datas *drvp, int flags)
1005 {
1006 	/*
1007 	 * Downgrade decision: if we get NERRS_MAX in NXFER.
1008 	 * We start with n_dmaerrs set to NERRS_MAX-1 so that the
1009 	 * first error within the first NXFER ops will immediatly trigger
1010 	 * a downgrade.
1011 	 * If we got an error and n_xfers is bigger than NXFER reset counters.
1012 	 */
1013 	drvp->n_dmaerrs++;
1014 	if (drvp->n_dmaerrs >= NERRS_MAX && drvp->n_xfers <= NXFER) {
1015 		ata_downgrade_mode(drvp, flags);
1016 		drvp->n_dmaerrs = NERRS_MAX-1;
1017 		drvp->n_xfers = 0;
1018 		return;
1019 	}
1020 	if (drvp->n_xfers > NXFER) {
1021 		drvp->n_dmaerrs = 1; /* just got an error */
1022 		drvp->n_xfers = 1; /* restart counting from this error */
1023 	}
1024 }
1025 #endif	/* NATA_DMA */
1026 
1027 /*
1028  * freeze the queue and wait for the controller to be idle. Caller has to
1029  * unfreeze/restart the queue
1030  */
1031 static void
1032 ata_channel_idle(struct ata_channel *chp)
1033 {
1034 	ata_channel_lock(chp);
1035 	ata_channel_freeze_locked(chp);
1036 	while (chp->ch_queue->queue_active > 0) {
1037 		chp->ch_queue->queue_flags |= QF_IDLE_WAIT;
1038 		cv_timedwait(&chp->ch_queue->queue_idle, &chp->ch_lock, 1);
1039 	}
1040 	ata_channel_unlock(chp);
1041 }
1042 
1043 /*
1044  * Add a command to the queue and start controller.
1045  *
1046  * MUST BE CALLED AT splbio()!
1047  */
1048 void
1049 ata_exec_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1050 {
1051 
1052 	ATADEBUG_PRINT(("ata_exec_xfer %p channel %d drive %d\n", xfer,
1053 	    chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1054 
1055 	/* complete xfer setup */
1056 	xfer->c_chp = chp;
1057 
1058 	ata_channel_lock(chp);
1059 
1060 	/*
1061 	 * Standard commands are added to the end of command list, but
1062 	 * recovery commands must be run immediatelly.
1063 	 */
1064 	if ((xfer->c_flags & C_RECOVERY) == 0)
1065 		TAILQ_INSERT_TAIL(&chp->ch_queue->queue_xfer, xfer,
1066 		    c_xferchain);
1067 	else
1068 		TAILQ_INSERT_HEAD(&chp->ch_queue->queue_xfer, xfer,
1069 		    c_xferchain);
1070 
1071 	/*
1072 	 * if polling and can sleep, wait for the xfer to be at head of queue
1073 	 */
1074 	if ((xfer->c_flags & (C_POLL | C_WAIT)) ==  (C_POLL | C_WAIT)) {
1075 		while (chp->ch_queue->queue_active > 0 ||
1076 		    TAILQ_FIRST(&chp->ch_queue->queue_xfer) != xfer) {
1077 			xfer->c_flags |= C_WAITACT;
1078 			cv_wait(&xfer->c_active, &chp->ch_lock);
1079 			xfer->c_flags &= ~C_WAITACT;
1080 
1081 			/*
1082 			 * Free xfer now if it there was attempt to free it
1083 			 * while we were waiting.
1084 			 */
1085 			if ((xfer->c_flags & (C_FREE|C_WAITTIMO)) == C_FREE) {
1086 				ata_channel_unlock(chp);
1087 
1088 				ata_free_xfer(chp, xfer);
1089 				return;
1090 			}
1091 		}
1092 	}
1093 
1094 	ata_channel_unlock(chp);
1095 
1096 	ATADEBUG_PRINT(("atastart from ata_exec_xfer, flags 0x%x\n",
1097 	    chp->ch_flags), DEBUG_XFERS);
1098 	atastart(chp);
1099 }
1100 
1101 /*
1102  * Start I/O on a controller, for the given channel.
1103  * The first xfer may be not for our channel if the channel queues
1104  * are shared.
1105  *
1106  * MUST BE CALLED AT splbio()!
1107  */
1108 void
1109 atastart(struct ata_channel *chp)
1110 {
1111 	struct atac_softc *atac = chp->ch_atac;
1112 	struct ata_queue *chq = chp->ch_queue;
1113 	struct ata_xfer *xfer, *axfer;
1114 	bool recovery;
1115 
1116 #ifdef ATA_DEBUG
1117 	int spl1, spl2;
1118 
1119 	spl1 = splbio();
1120 	spl2 = splbio();
1121 	if (spl2 != spl1) {
1122 		printf("atastart: not at splbio()\n");
1123 		panic("atastart");
1124 	}
1125 	splx(spl2);
1126 	splx(spl1);
1127 #endif /* ATA_DEBUG */
1128 
1129 	ata_channel_lock(chp);
1130 
1131 again:
1132 	KASSERT(chq->queue_active <= chq->queue_openings);
1133 	if (chq->queue_active == chq->queue_openings) {
1134 		ATADEBUG_PRINT(("%s(chp=%p): channel %d completely busy\n",
1135 		    __func__, chp, chp->ch_channel), DEBUG_XFERS);
1136 		goto out;
1137 	}
1138 
1139 	/* is there a xfer ? */
1140 	if ((xfer = TAILQ_FIRST(&chp->ch_queue->queue_xfer)) == NULL) {
1141 		ATADEBUG_PRINT(("%s(chp=%p): channel %d queue_xfer is empty\n",
1142 		    __func__, chp, chp->ch_channel), DEBUG_XFERS);
1143 		goto out;
1144 	}
1145 
1146 	recovery = ISSET(xfer->c_flags, C_RECOVERY);
1147 
1148 	/* is the queue frozen? */
1149 	if (__predict_false(!recovery && chq->queue_freeze > 0)) {
1150 		if (chq->queue_flags & QF_IDLE_WAIT) {
1151 			chq->queue_flags &= ~QF_IDLE_WAIT;
1152 			cv_signal(&chp->ch_queue->queue_idle);
1153 		}
1154 		ATADEBUG_PRINT(("%s(chp=%p): channel %d drive %d "
1155 		    "queue frozen: %d (recovery: %d)\n",
1156 		    __func__, chp, chp->ch_channel, xfer->c_drive,
1157 		    chq->queue_freeze, recovery),
1158 		    DEBUG_XFERS);
1159 		goto out;
1160 	}
1161 
1162 	/* all xfers on same queue must belong to the same channel */
1163 	KASSERT(xfer->c_chp == chp);
1164 
1165 	/*
1166 	 * Can only take the command if there are no current active
1167 	 * commands, or if the command is NCQ and the active commands are also
1168 	 * NCQ. If PM is in use and HBA driver doesn't support/use FIS-based
1169 	 * switching, can only send commands to single drive.
1170 	 * Need only check first xfer.
1171 	 * XXX FIS-based switching - revisit
1172 	 */
1173 	if (!recovery && (axfer = TAILQ_FIRST(&chp->ch_queue->active_xfers))) {
1174 		if (!ISSET(xfer->c_flags, C_NCQ) ||
1175 		    !ISSET(axfer->c_flags, C_NCQ) ||
1176 		    xfer->c_drive != axfer->c_drive)
1177 			goto out;
1178 	}
1179 
1180 	struct ata_drive_datas * const drvp = &chp->ch_drive[xfer->c_drive];
1181 
1182 	/*
1183 	 * if someone is waiting for the command to be active, wake it up
1184 	 * and let it process the command
1185 	 */
1186 	if (xfer->c_flags & C_WAITACT) {
1187 		ATADEBUG_PRINT(("atastart: xfer %p channel %d drive %d "
1188 		    "wait active\n", xfer, chp->ch_channel, xfer->c_drive),
1189 		    DEBUG_XFERS);
1190 		cv_signal(&xfer->c_active);
1191 		goto out;
1192 	}
1193 
1194 	if (atac->atac_claim_hw)
1195 		if (!atac->atac_claim_hw(chp, 0))
1196 			goto out;
1197 
1198 	ATADEBUG_PRINT(("%s(chp=%p): xfer %p channel %d drive %d\n",
1199 	    __func__, chp, xfer, chp->ch_channel, xfer->c_drive), DEBUG_XFERS);
1200 	if (drvp->drive_flags & ATA_DRIVE_RESET) {
1201 		drvp->drive_flags &= ~ATA_DRIVE_RESET;
1202 		drvp->state = 0;
1203 	}
1204 
1205 	if (ISSET(xfer->c_flags, C_NCQ))
1206 		SET(chp->ch_flags, ATACH_NCQ);
1207 	else
1208 		CLR(chp->ch_flags, ATACH_NCQ);
1209 
1210 	ata_activate_xfer_locked(chp, xfer);
1211 
1212 	if (atac->atac_cap & ATAC_CAP_NOIRQ)
1213 		KASSERT(xfer->c_flags & C_POLL);
1214 
1215 	switch (ata_xfer_start(xfer)) {
1216 	case ATASTART_TH:
1217 	case ATASTART_ABORT:
1218 		/* don't start any further commands in this case */
1219 		goto out;
1220 	default:
1221 		/* nothing to do */
1222 		break;
1223 	}
1224 
1225 	/* Queue more commands if possible, but not during recovery */
1226 	if (!recovery && chq->queue_active < chq->queue_openings)
1227 		goto again;
1228 
1229 out:
1230 	ata_channel_unlock(chp);
1231 }
1232 
1233 int
1234 ata_xfer_start(struct ata_xfer *xfer)
1235 {
1236 	struct ata_channel *chp = xfer->c_chp;
1237 	int rv;
1238 
1239 	KASSERT(mutex_owned(&chp->ch_lock));
1240 
1241 	rv = xfer->c_start(chp, xfer);
1242 	switch (rv) {
1243 	case ATASTART_STARTED:
1244 		/* nothing to do */
1245 		break;
1246 	case ATASTART_TH:
1247 		/* postpone xfer to thread */
1248 		ata_thread_wake_locked(chp);
1249 		break;
1250 	case ATASTART_POLL:
1251 		/* can happen even in thread context for some ATAPI devices */
1252 		ata_channel_unlock(chp);
1253 		KASSERT(xfer->c_poll != NULL);
1254 		xfer->c_poll(chp, xfer);
1255 		ata_channel_lock(chp);
1256 		break;
1257 	case ATASTART_ABORT:
1258 		ata_channel_unlock(chp);
1259 		KASSERT(xfer->c_abort != NULL);
1260 		xfer->c_abort(chp, xfer);
1261 		ata_channel_lock(chp);
1262 		break;
1263 	}
1264 
1265 	return rv;
1266 }
1267 
1268 static void
1269 ata_activate_xfer_locked(struct ata_channel *chp, struct ata_xfer *xfer)
1270 {
1271 	struct ata_queue * const chq = chp->ch_queue;
1272 
1273 	KASSERT(mutex_owned(&chp->ch_lock));
1274 
1275 	KASSERT(chq->queue_active < chq->queue_openings);
1276 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) == 0);
1277 
1278 	TAILQ_REMOVE(&chq->queue_xfer, xfer, c_xferchain);
1279 	if ((xfer->c_flags & C_RECOVERY) == 0)
1280 		TAILQ_INSERT_TAIL(&chq->active_xfers, xfer, c_activechain);
1281 	else {
1282 		/*
1283 		 * Must go to head, so that ata_queue_get_active_xfer()
1284 		 * returns the recovery command, and not some other
1285 		 * random active transfer.
1286 		 */
1287 		TAILQ_INSERT_HEAD(&chq->active_xfers, xfer, c_activechain);
1288 	}
1289 	chq->active_xfers_used |= __BIT(xfer->c_slot);
1290 	chq->queue_active++;
1291 }
1292 
1293 void
1294 ata_deactivate_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
1295 {
1296 	struct ata_queue * const chq = chp->ch_queue;
1297 
1298 	ata_channel_lock(chp);
1299 
1300 	KASSERT(chq->queue_active > 0);
1301 	KASSERT((chq->active_xfers_used & __BIT(xfer->c_slot)) != 0);
1302 
1303 	callout_stop(&xfer->c_timo_callout);
1304 
1305 	if (callout_invoking(&xfer->c_timo_callout))
1306 		xfer->c_flags |= C_WAITTIMO;
1307 
1308 	TAILQ_REMOVE(&chq->active_xfers, xfer, c_activechain);
1309 	chq->active_xfers_used &= ~__BIT(xfer->c_slot);
1310 	chq->queue_active--;
1311 
1312 	ata_channel_unlock(chp);
1313 }
1314 
1315 /*
1316  * Called in c_intr hook. Must be called before before any deactivations
1317  * are done - if there is drain pending, it calls c_kill_xfer hook which
1318  * deactivates the xfer.
1319  * Calls c_kill_xfer with channel lock free.
1320  * Returns true if caller should just exit without further processing.
1321  * Caller must not further access any part of xfer or any related controller
1322  * structures in that case, it should just return.
1323  */
1324 bool
1325 ata_waitdrain_xfer_check(struct ata_channel *chp, struct ata_xfer *xfer)
1326 {
1327 	int drive = xfer->c_drive;
1328 	bool draining = false;
1329 
1330 	ata_channel_lock(chp);
1331 
1332 	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1333 		ata_channel_unlock(chp);
1334 
1335 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE);
1336 
1337 		ata_channel_lock(chp);
1338 		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1339 		cv_signal(&chp->ch_queue->queue_drain);
1340 		draining = true;
1341 	}
1342 
1343 	ata_channel_unlock(chp);
1344 
1345 	return draining;
1346 }
1347 
1348 /*
1349  * Check for race of normal transfer handling vs. timeout.
1350  */
1351 bool
1352 ata_timo_xfer_check(struct ata_xfer *xfer)
1353 {
1354 	struct ata_channel *chp = xfer->c_chp;
1355 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1356 
1357 	ata_channel_lock(chp);
1358 
1359 	callout_ack(&xfer->c_timo_callout);
1360 
1361 	if (xfer->c_flags & C_WAITTIMO) {
1362 		xfer->c_flags &= ~C_WAITTIMO;
1363 
1364 		/* Handle race vs. ata_free_xfer() */
1365 		if (xfer->c_flags & C_FREE) {
1366 			xfer->c_flags &= ~C_FREE;
1367 			ata_channel_unlock(chp);
1368 
1369 	    		aprint_normal_dev(drvp->drv_softc,
1370 			    "xfer %d freed while invoking timeout\n",
1371 			    xfer->c_slot);
1372 
1373 			ata_free_xfer(chp, xfer);
1374 			return true;
1375 		}
1376 
1377 		/* Race vs. callout_stop() in ata_deactivate_xfer() */
1378 		ata_channel_unlock(chp);
1379 
1380 	    	aprint_normal_dev(drvp->drv_softc,
1381 		    "xfer %d deactivated while invoking timeout\n",
1382 		    xfer->c_slot);
1383 		return true;
1384 	}
1385 
1386 	ata_channel_unlock(chp);
1387 
1388 	/* No race, proceed with timeout handling */
1389 	return false;
1390 }
1391 
1392 void
1393 ata_timeout(void *v)
1394 {
1395 	struct ata_xfer *xfer = v;
1396 	int s;
1397 
1398 	ATADEBUG_PRINT(("%s: slot %d\n", __func__, xfer->c_slot),
1399 	    DEBUG_FUNCS|DEBUG_XFERS);
1400 
1401 	s = splbio();				/* XXX MPSAFE */
1402 
1403 	if (ata_timo_xfer_check(xfer)) {
1404 		/* Already logged */
1405 		goto out;
1406 	}
1407 
1408 	/* Mark as timed out. Do not print anything, wd(4) will. */
1409 	xfer->c_flags |= C_TIMEOU;
1410 	xfer->c_intr(xfer->c_chp, xfer, 0);
1411 
1412 out:
1413 	splx(s);
1414 }
1415 
1416 /*
1417  * Kill off all active xfers for a ata_channel.
1418  *
1419  * Must be called with channel lock held.
1420  */
1421 void
1422 ata_kill_active(struct ata_channel *chp, int reason, int flags)
1423 {
1424 	struct ata_queue * const chq = chp->ch_queue;
1425 	struct ata_xfer *xfer, *xfernext;
1426 
1427 	KASSERT(mutex_owned(&chp->ch_lock));
1428 
1429 	TAILQ_FOREACH_SAFE(xfer, &chq->active_xfers, c_activechain, xfernext) {
1430 		(*xfer->c_kill_xfer)(xfer->c_chp, xfer, reason);
1431 	}
1432 
1433 	if (flags & AT_RST_EMERG)
1434 		ata_queue_reset(chq);
1435 }
1436 
1437 /*
1438  * Kill off all pending xfers for a drive.
1439  */
1440 void
1441 ata_kill_pending(struct ata_drive_datas *drvp)
1442 {
1443 	struct ata_channel * const chp = drvp->chnl_softc;
1444 	struct ata_queue * const chq = chp->ch_queue;
1445 	struct ata_xfer *xfer, *xfernext;
1446 
1447 	ata_channel_lock(chp);
1448 
1449 	/* Kill all pending transfers */
1450 	TAILQ_FOREACH_SAFE(xfer, &chq->queue_xfer, c_xferchain, xfernext) {
1451 		KASSERT(xfer->c_chp == chp);
1452 
1453 		if (xfer->c_drive != drvp->drive)
1454 			continue;
1455 
1456 		TAILQ_REMOVE(&chp->ch_queue->queue_xfer, xfer, c_xferchain);
1457 
1458 		/*
1459 		 * Keep the lock, so that we get deadlock (and 'locking against
1460 		 * myself' with LOCKDEBUG), instead of silent
1461 		 * data corruption, if the hook tries to call back into
1462 		 * middle layer for inactive xfer.
1463 		 */
1464 		(*xfer->c_kill_xfer)(chp, xfer, KILL_GONE_INACTIVE);
1465 	}
1466 
1467 	/* Wait until all active transfers on the drive finish */
1468 	while (chq->queue_active > 0) {
1469 		bool drv_active = false;
1470 
1471 		TAILQ_FOREACH(xfer, &chq->active_xfers, c_activechain) {
1472 			KASSERT(xfer->c_chp == chp);
1473 
1474 			if (xfer->c_drive == drvp->drive) {
1475 				drv_active = true;
1476 				break;
1477 			}
1478 		}
1479 
1480 		if (!drv_active) {
1481 			/* all finished */
1482 			break;
1483 		}
1484 
1485 		drvp->drive_flags |= ATA_DRIVE_WAITDRAIN;
1486 		cv_wait(&chq->queue_drain, &chp->ch_lock);
1487 	}
1488 
1489 	ata_channel_unlock(chp);
1490 }
1491 
1492 static void
1493 ata_channel_freeze_locked(struct ata_channel *chp)
1494 {
1495 	chp->ch_queue->queue_freeze++;
1496 
1497 	ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1498 	    chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1499 }
1500 
1501 void
1502 ata_channel_freeze(struct ata_channel *chp)
1503 {
1504 	ata_channel_lock(chp);
1505 	ata_channel_freeze_locked(chp);
1506 	ata_channel_unlock(chp);
1507 }
1508 
1509 static void
1510 ata_channel_thaw_locked(struct ata_channel *chp)
1511 {
1512 	KASSERT(mutex_owned(&chp->ch_lock));
1513 	KASSERT(chp->ch_queue->queue_freeze > 0);
1514 
1515 	chp->ch_queue->queue_freeze--;
1516 
1517 	ATADEBUG_PRINT(("%s(chp=%p) -> %d\n", __func__, chp,
1518 	    chp->ch_queue->queue_freeze), DEBUG_FUNCS | DEBUG_XFERS);
1519 }
1520 
1521 void
1522 ata_channel_thaw(struct ata_channel *chp)
1523 {
1524 	ata_channel_lock(chp);
1525 	ata_channel_thaw_locked(chp);
1526 	ata_channel_unlock(chp);
1527 }
1528 
1529 /*
1530  * ata_reset_channel:
1531  *
1532  *	Reset and ATA channel.
1533  *
1534  *	MUST BE CALLED AT splbio()!
1535  */
1536 void
1537 ata_reset_channel(struct ata_channel *chp, int flags)
1538 {
1539 	struct atac_softc *atac = chp->ch_atac;
1540 	int drive;
1541 	bool threset = false;
1542 
1543 #ifdef ATA_DEBUG
1544 	int spl1, spl2;
1545 
1546 	spl1 = splbio();
1547 	spl2 = splbio();
1548 	if (spl2 != spl1) {
1549 		printf("ata_reset_channel: not at splbio()\n");
1550 		panic("ata_reset_channel");
1551 	}
1552 	splx(spl2);
1553 	splx(spl1);
1554 #endif /* ATA_DEBUG */
1555 
1556 	ata_channel_lock(chp);
1557 
1558 	/*
1559 	 * If we can poll or wait it's OK, otherwise wake up the
1560 	 * kernel thread to do it for us.
1561 	 */
1562 	ATADEBUG_PRINT(("ata_reset_channel flags 0x%x ch_flags 0x%x\n",
1563 	    flags, chp->ch_flags), DEBUG_FUNCS | DEBUG_XFERS);
1564 	if ((flags & (AT_POLL | AT_WAIT)) == 0) {
1565 		if (chp->ch_flags & ATACH_TH_RESET) {
1566 			/* No need to schedule a reset more than one time. */
1567 			ata_channel_unlock(chp);
1568 			return;
1569 		}
1570 
1571 		/*
1572 		 * Block execution of other commands while reset is scheduled
1573 		 * to a thread.
1574 		 */
1575 		ata_channel_freeze_locked(chp);
1576 		chp->ch_flags |= ATACH_TH_RESET;
1577 		chp->ch_reset_flags = flags & AT_RST_EMERG;
1578 		cv_signal(&chp->ch_thr_idle);
1579 		ata_channel_unlock(chp);
1580 		return;
1581 	}
1582 
1583 	/* Block execution of other commands during reset */
1584 	ata_channel_freeze_locked(chp);
1585 
1586 	/*
1587 	 * If reset has been scheduled to a thread, then clear
1588 	 * the flag now so that the thread won't try to execute it if
1589 	 * we happen to sleep, and thaw one more time after the reset.
1590 	 */
1591 	if (chp->ch_flags & ATACH_TH_RESET) {
1592 		chp->ch_flags &= ~ATACH_TH_RESET;
1593 		threset = true;
1594 	}
1595 
1596 	ata_channel_unlock(chp);
1597 
1598 	(*atac->atac_bustype_ata->ata_reset_channel)(chp, flags);
1599 
1600 	ata_channel_lock(chp);
1601 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1602 	for (drive = 0; drive < chp->ch_ndrives; drive++)
1603 		chp->ch_drive[drive].state = 0;
1604 
1605 	/*
1606 	 * Thaw one extra time to clear the freeze done when the reset has
1607 	 * been scheduled to the thread.
1608 	 */
1609 	if (threset)
1610 		ata_channel_thaw_locked(chp);
1611 
1612 	/* Allow commands to run again */
1613 	ata_channel_thaw_locked(chp);
1614 
1615 	/* Signal the thread in case there is an xfer to run */
1616 	cv_signal(&chp->ch_thr_idle);
1617 
1618 	ata_channel_unlock(chp);
1619 
1620 	if (flags & AT_RST_EMERG) {
1621 		/* make sure that we can use polled commands */
1622 		ata_queue_reset(chp->ch_queue);
1623 	} else {
1624 		atastart(chp);
1625 	}
1626 }
1627 
1628 int
1629 ata_addref(struct ata_channel *chp)
1630 {
1631 	struct atac_softc *atac = chp->ch_atac;
1632 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1633 	int s, error = 0;
1634 
1635 	s = splbio();
1636 	if (adapt->adapt_refcnt++ == 0 &&
1637 	    adapt->adapt_enable != NULL) {
1638 		error = (*adapt->adapt_enable)(atac->atac_dev, 1);
1639 		if (error)
1640 			adapt->adapt_refcnt--;
1641 	}
1642 	splx(s);
1643 	return (error);
1644 }
1645 
1646 void
1647 ata_delref(struct ata_channel *chp)
1648 {
1649 	struct atac_softc *atac = chp->ch_atac;
1650 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
1651 	int s;
1652 
1653 	s = splbio();
1654 	if (adapt->adapt_refcnt-- == 1 &&
1655 	    adapt->adapt_enable != NULL)
1656 		(void) (*adapt->adapt_enable)(atac->atac_dev, 0);
1657 	splx(s);
1658 }
1659 
1660 void
1661 ata_print_modes(struct ata_channel *chp)
1662 {
1663 	struct atac_softc *atac = chp->ch_atac;
1664 	int drive;
1665 	struct ata_drive_datas *drvp;
1666 
1667 	KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
1668 	for (drive = 0; drive < chp->ch_ndrives; drive++) {
1669 		drvp = &chp->ch_drive[drive];
1670 		if (drvp->drive_type == ATA_DRIVET_NONE ||
1671 		    drvp->drv_softc == NULL)
1672 			continue;
1673 		aprint_verbose("%s(%s:%d:%d): using PIO mode %d",
1674 			device_xname(drvp->drv_softc),
1675 			device_xname(atac->atac_dev),
1676 			chp->ch_channel, drvp->drive, drvp->PIO_mode);
1677 #if NATA_DMA
1678 		if (drvp->drive_flags & ATA_DRIVE_DMA)
1679 			aprint_verbose(", DMA mode %d", drvp->DMA_mode);
1680 #if NATA_UDMA
1681 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
1682 			aprint_verbose(", Ultra-DMA mode %d", drvp->UDMA_mode);
1683 			if (drvp->UDMA_mode == 2)
1684 				aprint_verbose(" (Ultra/33)");
1685 			else if (drvp->UDMA_mode == 4)
1686 				aprint_verbose(" (Ultra/66)");
1687 			else if (drvp->UDMA_mode == 5)
1688 				aprint_verbose(" (Ultra/100)");
1689 			else if (drvp->UDMA_mode == 6)
1690 				aprint_verbose(" (Ultra/133)");
1691 		}
1692 #endif	/* NATA_UDMA */
1693 #endif	/* NATA_DMA */
1694 #if NATA_DMA || NATA_PIOBM
1695 		if (0
1696 #if NATA_DMA
1697 		    || (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA))
1698 #endif
1699 #if NATA_PIOBM
1700 		    /* PIOBM capable controllers use DMA for PIO commands */
1701 		    || (atac->atac_cap & ATAC_CAP_PIOBM)
1702 #endif
1703 		    )
1704 			aprint_verbose(" (using DMA)");
1705 
1706 		if (drvp->drive_flags & ATA_DRIVE_NCQ) {
1707 			aprint_verbose(", NCQ (%d tags)%s",
1708 			    ATA_REAL_OPENINGS(chp->ch_queue->queue_openings),
1709 			    (drvp->drive_flags & ATA_DRIVE_NCQ_PRIO)
1710 			    ? " w/PRIO" : "");
1711 		} else if (drvp->drive_flags & ATA_DRIVE_WFUA)
1712 			aprint_verbose(", WRITE DMA FUA EXT");
1713 
1714 #endif	/* NATA_DMA || NATA_PIOBM */
1715 		aprint_verbose("\n");
1716 	}
1717 }
1718 
1719 #if NATA_DMA
1720 /*
1721  * downgrade the transfer mode of a drive after an error. return 1 if
1722  * downgrade was possible, 0 otherwise.
1723  *
1724  * MUST BE CALLED AT splbio()!
1725  */
1726 int
1727 ata_downgrade_mode(struct ata_drive_datas *drvp, int flags)
1728 {
1729 	struct ata_channel *chp = drvp->chnl_softc;
1730 	struct atac_softc *atac = chp->ch_atac;
1731 	device_t drv_dev = drvp->drv_softc;
1732 	int cf_flags = device_cfdata(drv_dev)->cf_flags;
1733 
1734 	/* if drive or controller don't know its mode, we can't do much */
1735 	if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0 ||
1736 	    (atac->atac_set_modes == NULL))
1737 		return 0;
1738 	/* current drive mode was set by a config flag, let it this way */
1739 	if ((cf_flags & ATA_CONFIG_PIO_SET) ||
1740 	    (cf_flags & ATA_CONFIG_DMA_SET) ||
1741 	    (cf_flags & ATA_CONFIG_UDMA_SET))
1742 		return 0;
1743 
1744 #if NATA_UDMA
1745 	/*
1746 	 * If we were using Ultra-DMA mode, downgrade to the next lower mode.
1747 	 */
1748 	if ((drvp->drive_flags & ATA_DRIVE_UDMA) && drvp->UDMA_mode >= 2) {
1749 		drvp->UDMA_mode--;
1750 		aprint_error_dev(drv_dev,
1751 		    "transfer error, downgrading to Ultra-DMA mode %d\n",
1752 		    drvp->UDMA_mode);
1753 	}
1754 #endif
1755 
1756 	/*
1757 	 * If we were using ultra-DMA, don't downgrade to multiword DMA.
1758 	 */
1759 	else if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
1760 		drvp->drive_flags &= ~(ATA_DRIVE_DMA | ATA_DRIVE_UDMA);
1761 		drvp->PIO_mode = drvp->PIO_cap;
1762 		aprint_error_dev(drv_dev,
1763 		    "transfer error, downgrading to PIO mode %d\n",
1764 		    drvp->PIO_mode);
1765 	} else /* already using PIO, can't downgrade */
1766 		return 0;
1767 
1768 	(*atac->atac_set_modes)(chp);
1769 	ata_print_modes(chp);
1770 	/* reset the channel, which will schedule all drives for setup */
1771 	ata_reset_channel(chp, flags);
1772 	return 1;
1773 }
1774 #endif	/* NATA_DMA */
1775 
1776 /*
1777  * Probe drive's capabilities, for use by the controller later
1778  * Assumes drvp points to an existing drive.
1779  */
1780 void
1781 ata_probe_caps(struct ata_drive_datas *drvp)
1782 {
1783 	struct ataparams params, params2;
1784 	struct ata_channel *chp = drvp->chnl_softc;
1785 	struct atac_softc *atac = chp->ch_atac;
1786 	device_t drv_dev = drvp->drv_softc;
1787 	int i, printed = 0;
1788 	const char *sep = "";
1789 	int cf_flags;
1790 
1791 	if (ata_get_params(drvp, AT_WAIT, &params) != CMD_OK) {
1792 		/* IDENTIFY failed. Can't tell more about the device */
1793 		return;
1794 	}
1795 	if ((atac->atac_cap & (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) ==
1796 	    (ATAC_CAP_DATA16 | ATAC_CAP_DATA32)) {
1797 		/*
1798 		 * Controller claims 16 and 32 bit transfers.
1799 		 * Re-do an IDENTIFY with 32-bit transfers,
1800 		 * and compare results.
1801 		 */
1802 		ata_channel_lock(chp);
1803 		drvp->drive_flags |= ATA_DRIVE_CAP32;
1804 		ata_channel_unlock(chp);
1805 		ata_get_params(drvp, AT_WAIT, &params2);
1806 		if (memcmp(&params, &params2, sizeof(struct ataparams)) != 0) {
1807 			/* Not good. fall back to 16bits */
1808 			ata_channel_lock(chp);
1809 			drvp->drive_flags &= ~ATA_DRIVE_CAP32;
1810 			ata_channel_unlock(chp);
1811 		} else {
1812 			aprint_verbose_dev(drv_dev, "32-bit data port\n");
1813 		}
1814 	}
1815 #if 0 /* Some ultra-DMA drives claims to only support ATA-3. sigh */
1816 	if (params.atap_ata_major > 0x01 &&
1817 	    params.atap_ata_major != 0xffff) {
1818 		for (i = 14; i > 0; i--) {
1819 			if (params.atap_ata_major & (1 << i)) {
1820 				aprint_verbose_dev(drv_dev,
1821 				    "ATA version %d\n", i);
1822 				drvp->ata_vers = i;
1823 				break;
1824 			}
1825 		}
1826 	}
1827 #endif
1828 
1829 	/* An ATAPI device is at last PIO mode 3 */
1830 	if (drvp->drive_type == ATA_DRIVET_ATAPI)
1831 		drvp->PIO_mode = 3;
1832 
1833 	/*
1834 	 * It's not in the specs, but it seems that some drive
1835 	 * returns 0xffff in atap_extensions when this field is invalid
1836 	 */
1837 	if (params.atap_extensions != 0xffff &&
1838 	    (params.atap_extensions & WDC_EXT_MODES)) {
1839 		/*
1840 		 * XXX some drives report something wrong here (they claim to
1841 		 * support PIO mode 8 !). As mode is coded on 3 bits in
1842 		 * SET FEATURE, limit it to 7 (so limit i to 4).
1843 		 * If higher mode than 7 is found, abort.
1844 		 */
1845 		for (i = 7; i >= 0; i--) {
1846 			if ((params.atap_piomode_supp & (1 << i)) == 0)
1847 				continue;
1848 			if (i > 4)
1849 				return;
1850 			/*
1851 			 * See if mode is accepted.
1852 			 * If the controller can't set its PIO mode,
1853 			 * assume the defaults are good, so don't try
1854 			 * to set it
1855 			 */
1856 			if (atac->atac_set_modes)
1857 				/*
1858 				 * It's OK to pool here, it's fast enough
1859 				 * to not bother waiting for interrupt
1860 				 */
1861 				if (ata_set_mode(drvp, 0x08 | (i + 3),
1862 				   AT_WAIT) != CMD_OK)
1863 					continue;
1864 			if (!printed) {
1865 				aprint_verbose_dev(drv_dev,
1866 				    "drive supports PIO mode %d", i + 3);
1867 				sep = ",";
1868 				printed = 1;
1869 			}
1870 			/*
1871 			 * If controller's driver can't set its PIO mode,
1872 			 * get the highter one for the drive.
1873 			 */
1874 			if (atac->atac_set_modes == NULL ||
1875 			    atac->atac_pio_cap >= i + 3) {
1876 				drvp->PIO_mode = i + 3;
1877 				drvp->PIO_cap = i + 3;
1878 				break;
1879 			}
1880 		}
1881 		if (!printed) {
1882 			/*
1883 			 * We didn't find a valid PIO mode.
1884 			 * Assume the values returned for DMA are buggy too
1885 			 */
1886 			return;
1887 		}
1888 		ata_channel_lock(chp);
1889 		drvp->drive_flags |= ATA_DRIVE_MODE;
1890 		ata_channel_unlock(chp);
1891 		printed = 0;
1892 		for (i = 7; i >= 0; i--) {
1893 			if ((params.atap_dmamode_supp & (1 << i)) == 0)
1894 				continue;
1895 #if NATA_DMA
1896 			if ((atac->atac_cap & ATAC_CAP_DMA) &&
1897 			    atac->atac_set_modes != NULL)
1898 				if (ata_set_mode(drvp, 0x20 | i, AT_WAIT)
1899 				    != CMD_OK)
1900 					continue;
1901 #endif
1902 			if (!printed) {
1903 				aprint_verbose("%s DMA mode %d", sep, i);
1904 				sep = ",";
1905 				printed = 1;
1906 			}
1907 #if NATA_DMA
1908 			if (atac->atac_cap & ATAC_CAP_DMA) {
1909 				if (atac->atac_set_modes != NULL &&
1910 				    atac->atac_dma_cap < i)
1911 					continue;
1912 				drvp->DMA_mode = i;
1913 				drvp->DMA_cap = i;
1914 				ata_channel_lock(chp);
1915 				drvp->drive_flags |= ATA_DRIVE_DMA;
1916 				ata_channel_unlock(chp);
1917 			}
1918 #endif
1919 			break;
1920 		}
1921 		if (params.atap_extensions & WDC_EXT_UDMA_MODES) {
1922 			printed = 0;
1923 			for (i = 7; i >= 0; i--) {
1924 				if ((params.atap_udmamode_supp & (1 << i))
1925 				    == 0)
1926 					continue;
1927 #if NATA_UDMA
1928 				if (atac->atac_set_modes != NULL &&
1929 				    (atac->atac_cap & ATAC_CAP_UDMA))
1930 					if (ata_set_mode(drvp, 0x40 | i,
1931 					    AT_WAIT) != CMD_OK)
1932 						continue;
1933 #endif
1934 				if (!printed) {
1935 					aprint_verbose("%s Ultra-DMA mode %d",
1936 					    sep, i);
1937 					if (i == 2)
1938 						aprint_verbose(" (Ultra/33)");
1939 					else if (i == 4)
1940 						aprint_verbose(" (Ultra/66)");
1941 					else if (i == 5)
1942 						aprint_verbose(" (Ultra/100)");
1943 					else if (i == 6)
1944 						aprint_verbose(" (Ultra/133)");
1945 					sep = ",";
1946 					printed = 1;
1947 				}
1948 #if NATA_UDMA
1949 				if (atac->atac_cap & ATAC_CAP_UDMA) {
1950 					if (atac->atac_set_modes != NULL &&
1951 					    atac->atac_udma_cap < i)
1952 						continue;
1953 					drvp->UDMA_mode = i;
1954 					drvp->UDMA_cap = i;
1955 					ata_channel_lock(chp);
1956 					drvp->drive_flags |= ATA_DRIVE_UDMA;
1957 					ata_channel_unlock(chp);
1958 				}
1959 #endif
1960 				break;
1961 			}
1962 		}
1963 	}
1964 
1965 	ata_channel_lock(chp);
1966 	drvp->drive_flags &= ~ATA_DRIVE_NOSTREAM;
1967 	if (drvp->drive_type == ATA_DRIVET_ATAPI) {
1968 		if (atac->atac_cap & ATAC_CAP_ATAPI_NOSTREAM)
1969 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
1970 	} else {
1971 		if (atac->atac_cap & ATAC_CAP_ATA_NOSTREAM)
1972 			drvp->drive_flags |= ATA_DRIVE_NOSTREAM;
1973 	}
1974 	ata_channel_unlock(chp);
1975 
1976 	/* Try to guess ATA version here, if it didn't get reported */
1977 	if (drvp->ata_vers == 0) {
1978 #if NATA_UDMA
1979 		if (drvp->drive_flags & ATA_DRIVE_UDMA)
1980 			drvp->ata_vers = 4; /* should be at last ATA-4 */
1981 		else
1982 #endif
1983 		if (drvp->PIO_cap > 2)
1984 			drvp->ata_vers = 2; /* should be at last ATA-2 */
1985 	}
1986 	cf_flags = device_cfdata(drv_dev)->cf_flags;
1987 	if (cf_flags & ATA_CONFIG_PIO_SET) {
1988 		ata_channel_lock(chp);
1989 		drvp->PIO_mode =
1990 		    (cf_flags & ATA_CONFIG_PIO_MODES) >> ATA_CONFIG_PIO_OFF;
1991 		drvp->drive_flags |= ATA_DRIVE_MODE;
1992 		ata_channel_unlock(chp);
1993 	}
1994 #if NATA_DMA
1995 	if ((atac->atac_cap & ATAC_CAP_DMA) == 0) {
1996 		/* don't care about DMA modes */
1997 		return;
1998 	}
1999 	if (cf_flags & ATA_CONFIG_DMA_SET) {
2000 		ata_channel_lock(chp);
2001 		if ((cf_flags & ATA_CONFIG_DMA_MODES) ==
2002 		    ATA_CONFIG_DMA_DISABLE) {
2003 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
2004 		} else {
2005 			drvp->DMA_mode = (cf_flags & ATA_CONFIG_DMA_MODES) >>
2006 			    ATA_CONFIG_DMA_OFF;
2007 			drvp->drive_flags |= ATA_DRIVE_DMA | ATA_DRIVE_MODE;
2008 		}
2009 		ata_channel_unlock(chp);
2010 	}
2011 
2012 	/*
2013 	 * Probe WRITE DMA FUA EXT. Support is mandatory for devices
2014 	 * supporting LBA48, but nevertheless confirm with the feature flag.
2015 	 */
2016 	if (drvp->drive_flags & ATA_DRIVE_DMA) {
2017 		if ((params.atap_cmd2_en & ATA_CMD2_LBA48) != 0
2018 		    && (params.atap_cmd_def & ATA_CMDE_WFE)) {
2019 			drvp->drive_flags |= ATA_DRIVE_WFUA;
2020 			aprint_verbose("%s WRITE DMA FUA", sep);
2021 			sep = ",";
2022 		}
2023 	}
2024 
2025 	/* Probe NCQ support - READ/WRITE FPDMA QUEUED command support */
2026 	ata_channel_lock(chp);
2027 	drvp->drv_openings = 1;
2028 	if (params.atap_sata_caps & SATA_NATIVE_CMDQ) {
2029 		if (atac->atac_cap & ATAC_CAP_NCQ)
2030 			drvp->drive_flags |= ATA_DRIVE_NCQ;
2031 		drvp->drv_openings =
2032 		    (params.atap_queuedepth & WDC_QUEUE_DEPTH_MASK) + 1;
2033 		aprint_verbose("%s NCQ (%d tags)", sep, drvp->drv_openings);
2034 		sep = ",";
2035 
2036 		if (params.atap_sata_caps & SATA_NCQ_PRIO) {
2037 			drvp->drive_flags |= ATA_DRIVE_NCQ_PRIO;
2038 			aprint_verbose(" w/PRIO");
2039 		}
2040 	}
2041 	ata_channel_unlock(chp);
2042 
2043 	if (printed)
2044 		aprint_verbose("\n");
2045 
2046 #if NATA_UDMA
2047 	if ((atac->atac_cap & ATAC_CAP_UDMA) == 0) {
2048 		/* don't care about UDMA modes */
2049 		return;
2050 	}
2051 	if (cf_flags & ATA_CONFIG_UDMA_SET) {
2052 		ata_channel_lock(chp);
2053 		if ((cf_flags & ATA_CONFIG_UDMA_MODES) ==
2054 		    ATA_CONFIG_UDMA_DISABLE) {
2055 			drvp->drive_flags &= ~ATA_DRIVE_UDMA;
2056 		} else {
2057 			drvp->UDMA_mode = (cf_flags & ATA_CONFIG_UDMA_MODES) >>
2058 			    ATA_CONFIG_UDMA_OFF;
2059 			drvp->drive_flags |= ATA_DRIVE_UDMA | ATA_DRIVE_MODE;
2060 		}
2061 		ata_channel_unlock(chp);
2062 	}
2063 #endif	/* NATA_UDMA */
2064 #endif	/* NATA_DMA */
2065 }
2066 
2067 /* management of the /dev/atabus* devices */
2068 int
2069 atabusopen(dev_t dev, int flag, int fmt, struct lwp *l)
2070 {
2071 	struct atabus_softc *sc;
2072 	int error;
2073 
2074 	sc = device_lookup_private(&atabus_cd, minor(dev));
2075 	if (sc == NULL)
2076 		return (ENXIO);
2077 
2078 	if (sc->sc_flags & ATABUSCF_OPEN)
2079 		return (EBUSY);
2080 
2081 	if ((error = ata_addref(sc->sc_chan)) != 0)
2082 		return (error);
2083 
2084 	sc->sc_flags |= ATABUSCF_OPEN;
2085 
2086 	return (0);
2087 }
2088 
2089 
2090 int
2091 atabusclose(dev_t dev, int flag, int fmt, struct lwp *l)
2092 {
2093 	struct atabus_softc *sc =
2094 	    device_lookup_private(&atabus_cd, minor(dev));
2095 
2096 	ata_delref(sc->sc_chan);
2097 
2098 	sc->sc_flags &= ~ATABUSCF_OPEN;
2099 
2100 	return (0);
2101 }
2102 
2103 int
2104 atabusioctl(dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l)
2105 {
2106 	struct atabus_softc *sc =
2107 	    device_lookup_private(&atabus_cd, minor(dev));
2108 	struct ata_channel *chp = sc->sc_chan;
2109 	int min_drive, max_drive, drive;
2110 	int error;
2111 	int s;
2112 
2113 	/*
2114 	 * Enforce write permission for ioctls that change the
2115 	 * state of the bus.  Host adapter specific ioctls must
2116 	 * be checked by the adapter driver.
2117 	 */
2118 	switch (cmd) {
2119 	case ATABUSIOSCAN:
2120 	case ATABUSIODETACH:
2121 	case ATABUSIORESET:
2122 		if ((flag & FWRITE) == 0)
2123 			return (EBADF);
2124 	}
2125 
2126 	switch (cmd) {
2127 	case ATABUSIORESET:
2128 		s = splbio();
2129 		ata_reset_channel(sc->sc_chan, AT_WAIT | AT_POLL);
2130 		splx(s);
2131 		return 0;
2132 	case ATABUSIOSCAN:
2133 	{
2134 #if 0
2135 		struct atabusioscan_args *a=
2136 		    (struct atabusioscan_args *)addr;
2137 #endif
2138 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2139 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2140 			return (EOPNOTSUPP);
2141 		return (EOPNOTSUPP);
2142 	}
2143 	case ATABUSIODETACH:
2144 	{
2145 		struct atabusiodetach_args *a=
2146 		    (struct atabusiodetach_args *)addr;
2147 		if ((chp->ch_drive[0].drive_type == ATA_DRIVET_OLD) ||
2148 		    (chp->ch_drive[1].drive_type == ATA_DRIVET_OLD))
2149 			return (EOPNOTSUPP);
2150 		switch (a->at_dev) {
2151 		case -1:
2152 			min_drive = 0;
2153 			max_drive = 1;
2154 			break;
2155 		case 0:
2156 		case 1:
2157 			min_drive = max_drive = a->at_dev;
2158 			break;
2159 		default:
2160 			return (EINVAL);
2161 		}
2162 		for (drive = min_drive; drive <= max_drive; drive++) {
2163 			if (chp->ch_drive[drive].drv_softc != NULL) {
2164 				error = config_detach(
2165 				    chp->ch_drive[drive].drv_softc, 0);
2166 				if (error)
2167 					return (error);
2168 				KASSERT(chp->ch_drive[drive].drv_softc == NULL);
2169 			}
2170 		}
2171 		return 0;
2172 	}
2173 	default:
2174 		return ENOTTY;
2175 	}
2176 }
2177 
2178 static bool
2179 atabus_suspend(device_t dv, const pmf_qual_t *qual)
2180 {
2181 	struct atabus_softc *sc = device_private(dv);
2182 	struct ata_channel *chp = sc->sc_chan;
2183 
2184 	ata_channel_idle(chp);
2185 
2186 	return true;
2187 }
2188 
2189 static bool
2190 atabus_resume(device_t dv, const pmf_qual_t *qual)
2191 {
2192 	struct atabus_softc *sc = device_private(dv);
2193 	struct ata_channel *chp = sc->sc_chan;
2194 
2195 	/*
2196 	 * XXX joerg: with wdc, the first channel unfreezes the controler.
2197 	 * Move this the reset and queue idling into wdc.
2198 	 */
2199 	ata_channel_lock(chp);
2200 	if (chp->ch_queue->queue_freeze == 0) {
2201 		ata_channel_unlock(chp);
2202 		goto out;
2203 	}
2204 
2205 	/* unfreeze the queue and reset drives */
2206 	ata_channel_thaw_locked(chp);
2207 
2208 	ata_channel_unlock(chp);
2209 
2210 	/* reset channel only if there are drives attached */
2211 	if (chp->ch_ndrives > 0)
2212 		ata_reset_channel(chp, AT_WAIT);
2213 
2214 out:
2215 	return true;
2216 }
2217 
2218 static int
2219 atabus_rescan(device_t self, const char *ifattr, const int *locators)
2220 {
2221 	struct atabus_softc *sc = device_private(self);
2222 	struct ata_channel *chp = sc->sc_chan;
2223 	struct atabus_initq *initq;
2224 	int i;
2225 
2226 	/*
2227 	 * we can rescan a port multiplier atabus, even if some devices are
2228 	 * still attached
2229 	 */
2230 	if (chp->ch_satapmp_nports == 0) {
2231 		if (chp->atapibus != NULL) {
2232 			return EBUSY;
2233 		}
2234 
2235 		KASSERT(chp->ch_ndrives == 0 || chp->ch_drive != NULL);
2236 		for (i = 0; i < chp->ch_ndrives; i++) {
2237 			if (chp->ch_drive[i].drv_softc != NULL) {
2238 				return EBUSY;
2239 			}
2240 		}
2241 	}
2242 
2243 	initq = malloc(sizeof(*initq), M_DEVBUF, M_WAITOK);
2244 	initq->atabus_sc = sc;
2245 	mutex_enter(&atabus_qlock);
2246 	TAILQ_INSERT_TAIL(&atabus_initq_head, initq, atabus_initq);
2247 	mutex_exit(&atabus_qlock);
2248 	config_pending_incr(sc->sc_dev);
2249 
2250 	ata_channel_lock(chp);
2251 	chp->ch_flags |= ATACH_TH_RESCAN;
2252 	cv_signal(&chp->ch_thr_idle);
2253 	ata_channel_unlock(chp);
2254 
2255 	return 0;
2256 }
2257 
2258 void
2259 ata_delay(struct ata_channel *chp, int ms, const char *msg, int flags)
2260 {
2261 
2262 	if ((flags & (AT_WAIT | AT_POLL)) == AT_POLL) {
2263 		/*
2264 		 * can't use kpause(), we may be in interrupt context
2265 		 * or taking a crash dump
2266 		 */
2267 		delay(ms * 1000);
2268 	} else {
2269 		int pause = mstohz(ms);
2270 
2271 		KASSERT(mutex_owned(&chp->ch_lock));
2272 		kpause(msg, false, pause > 0 ? pause : 1, &chp->ch_lock);
2273 	}
2274 }
2275 
2276 void
2277 atacmd_toncq(struct ata_xfer *xfer, uint8_t *cmd, uint16_t *count,
2278     uint16_t *features, uint8_t *device)
2279 {
2280 	if ((xfer->c_flags & C_NCQ) == 0) {
2281 		/* FUA handling for non-NCQ drives */
2282 		if (xfer->c_bio.flags & ATA_FUA
2283 		    && *cmd == WDCC_WRITEDMA_EXT)
2284 			*cmd = WDCC_WRITEDMA_FUA_EXT;
2285 
2286 		return;
2287 	}
2288 
2289 	*cmd = (xfer->c_bio.flags & ATA_READ) ?
2290 	    WDCC_READ_FPDMA_QUEUED : WDCC_WRITE_FPDMA_QUEUED;
2291 
2292 	/* for FPDMA the block count is in features */
2293 	*features = *count;
2294 
2295 	/* NCQ tag */
2296 	*count = (xfer->c_slot << 3);
2297 
2298 	if (xfer->c_bio.flags & ATA_PRIO_HIGH)
2299 		*count |= WDSC_PRIO_HIGH;
2300 
2301 	/* other device flags */
2302 	if (xfer->c_bio.flags & ATA_FUA)
2303 		*device |= WDSD_FUA;
2304 }
2305 
2306 void
2307 ata_wait_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
2308 {
2309 	KASSERT(mutex_owned(&chp->ch_lock));
2310 
2311 	cv_wait(&xfer->c_finish, &chp->ch_lock);
2312 }
2313 
2314 void
2315 ata_wake_xfer(struct ata_channel *chp, struct ata_xfer *xfer)
2316 {
2317 	KASSERT(mutex_owned(&chp->ch_lock));
2318 
2319 	cv_signal(&xfer->c_finish);
2320 }
2321