xref: /netbsd-src/sys/dev/ic/mvsata.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*	$NetBSD: mvsata.c,v 1.35 2016/05/02 19:18:29 christos Exp $	*/
2 /*
3  * Copyright (c) 2008 KIYOHARA Takashi
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18  * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
19  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
23  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
24  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: mvsata.c,v 1.35 2016/05/02 19:18:29 christos Exp $");
30 
31 #include "opt_mvsata.h"
32 
33 /* ATAPI implementation not finished. */
34 //#include "atapibus.h"
35 
36 #include <sys/param.h>
37 #if NATAPIBUS > 0
38 #include <sys/buf.h>
39 #endif
40 #include <sys/bus.h>
41 #include <sys/cpu.h>
42 #include <sys/device.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
47 #include <sys/proc.h>
48 
49 #include <machine/vmparam.h>
50 
51 #include <dev/ata/atareg.h>
52 #include <dev/ata/atavar.h>
53 #include <dev/ic/wdcvar.h>
54 #include <dev/ata/satapmpreg.h>
55 #include <dev/ata/satareg.h>
56 #include <dev/ata/satavar.h>
57 
58 #if NATAPIBUS > 0
59 #include <dev/scsipi/scsi_all.h>	/* for SCSI status */
60 #endif
61 
62 #include <dev/pci/pcidevs.h>
63 
64 #include <dev/ic/mvsatareg.h>
65 #include <dev/ic/mvsatavar.h>
66 
67 
68 #define MVSATA_DEV(sc)		((sc)->sc_wdcdev.sc_atac.atac_dev)
69 #define MVSATA_DEV2(mvport)	((mvport)->port_ata_channel.ch_atac->atac_dev)
70 
71 #define MVSATA_HC_READ_4(hc, reg) \
72 	bus_space_read_4((hc)->hc_iot, (hc)->hc_ioh, (reg))
73 #define MVSATA_HC_WRITE_4(hc, reg, val) \
74 	bus_space_write_4((hc)->hc_iot, (hc)->hc_ioh, (reg), (val))
75 #define MVSATA_EDMA_READ_4(mvport, reg) \
76 	bus_space_read_4((mvport)->port_iot, (mvport)->port_ioh, (reg))
77 #define MVSATA_EDMA_WRITE_4(mvport, reg, val) \
78 	bus_space_write_4((mvport)->port_iot, (mvport)->port_ioh, (reg), (val))
79 #define MVSATA_WDC_READ_2(mvport, reg) \
80 	bus_space_read_2((mvport)->port_iot, (mvport)->port_ioh, \
81 	SHADOW_REG_BLOCK_OFFSET + (reg))
82 #define MVSATA_WDC_READ_1(mvport, reg) \
83 	bus_space_read_1((mvport)->port_iot, (mvport)->port_ioh, \
84 	SHADOW_REG_BLOCK_OFFSET + (reg))
85 #define MVSATA_WDC_WRITE_2(mvport, reg, val) \
86 	bus_space_write_2((mvport)->port_iot, (mvport)->port_ioh, \
87 	SHADOW_REG_BLOCK_OFFSET + (reg), (val))
88 #define MVSATA_WDC_WRITE_1(mvport, reg, val) \
89 	bus_space_write_1((mvport)->port_iot, (mvport)->port_ioh, \
90 	SHADOW_REG_BLOCK_OFFSET + (reg), (val))
91 
92 #ifdef MVSATA_DEBUG
93 #define DPRINTF(x)	if (mvsata_debug) printf x
94 #define	DPRINTFN(n,x)	if (mvsata_debug >= (n)) printf x
95 int	mvsata_debug = 2;
96 #else
97 #define DPRINTF(x)
98 #define DPRINTFN(n,x)
99 #endif
100 
101 #define ATA_DELAY		10000	/* 10s for a drive I/O */
102 #define ATAPI_DELAY		10	/* 10 ms, this is used only before
103 					   sending a cmd */
104 #define ATAPI_MODE_DELAY	1000	/* 1s, timeout for SET_FEATURE cmds */
105 
106 #define MVSATA_EPRD_MAX_SIZE	(sizeof(struct eprd) * (MAXPHYS / PAGE_SIZE))
107 
108 
109 static void mvsata_probe_drive(struct ata_channel *);
110 #ifndef MVSATA_WITHOUTDMA
111 static int mvsata_bio(struct ata_drive_datas *, struct ata_bio *);
112 static void mvsata_reset_drive(struct ata_drive_datas *, int, uint32_t *);
113 static void mvsata_reset_channel(struct ata_channel *, int);
114 static int mvsata_exec_command(struct ata_drive_datas *, struct ata_command *);
115 static int mvsata_addref(struct ata_drive_datas *);
116 static void mvsata_delref(struct ata_drive_datas *);
117 static void mvsata_killpending(struct ata_drive_datas *);
118 
119 #if NATAPIBUS > 0
120 static void mvsata_atapibus_attach(struct atabus_softc *);
121 static void mvsata_atapi_scsipi_request(struct scsipi_channel *,
122 					scsipi_adapter_req_t, void *);
123 static void mvsata_atapi_minphys(struct buf *);
124 static void mvsata_atapi_probe_device(struct atapibus_softc *, int);
125 static void mvsata_atapi_kill_pending(struct scsipi_periph *);
126 #endif
127 #endif
128 
129 static void mvsata_setup_channel(struct ata_channel *);
130 
131 #ifndef MVSATA_WITHOUTDMA
132 static void mvsata_bio_start(struct ata_channel *, struct ata_xfer *);
133 static int mvsata_bio_intr(struct ata_channel *, struct ata_xfer *, int);
134 static void mvsata_bio_kill_xfer(struct ata_channel *, struct ata_xfer *, int);
135 static void mvsata_bio_done(struct ata_channel *, struct ata_xfer *);
136 static int mvsata_bio_ready(struct mvsata_port *, struct ata_bio *, int,
137 			    int);
138 static void mvsata_wdc_cmd_start(struct ata_channel *, struct ata_xfer *);
139 static int mvsata_wdc_cmd_intr(struct ata_channel *, struct ata_xfer *, int);
140 static void mvsata_wdc_cmd_kill_xfer(struct ata_channel *, struct ata_xfer *,
141 				     int);
142 static void mvsata_wdc_cmd_done(struct ata_channel *, struct ata_xfer *);
143 static void mvsata_wdc_cmd_done_end(struct ata_channel *, struct ata_xfer *);
144 #if NATAPIBUS > 0
145 static void mvsata_atapi_start(struct ata_channel *, struct ata_xfer *);
146 static int mvsata_atapi_intr(struct ata_channel *, struct ata_xfer *, int);
147 static void mvsata_atapi_kill_xfer(struct ata_channel *, struct ata_xfer *,
148 				   int);
149 static void mvsata_atapi_reset(struct ata_channel *, struct ata_xfer *);
150 static void mvsata_atapi_phase_complete(struct ata_xfer *);
151 static void mvsata_atapi_done(struct ata_channel *, struct ata_xfer *);
152 static void mvsata_atapi_polldsc(void *);
153 #endif
154 
155 static int mvsata_edma_enqueue(struct mvsata_port *, struct ata_bio *, void *);
156 static int mvsata_edma_handle(struct mvsata_port *, struct ata_xfer *);
157 static int mvsata_edma_wait(struct mvsata_port *, struct ata_xfer *, int);
158 static void mvsata_edma_timeout(void *);
159 static void mvsata_edma_rqq_remove(struct mvsata_port *, struct ata_xfer *);
160 #if NATAPIBUS > 0
161 static int mvsata_bdma_init(struct mvsata_port *, struct scsipi_xfer *, void *);
162 static void mvsata_bdma_start(struct mvsata_port *);
163 #endif
164 #endif
165 
166 static int mvsata_port_init(struct mvsata_hc *, int);
167 static int mvsata_wdc_reg_init(struct mvsata_port *, struct wdc_regs *);
168 #ifndef MVSATA_WITHOUTDMA
169 static inline void mvsata_quetag_init(struct mvsata_port *);
170 static inline int mvsata_quetag_get(struct mvsata_port *);
171 static inline void mvsata_quetag_put(struct mvsata_port *, int);
172 static void *mvsata_edma_resource_prepare(struct mvsata_port *, bus_dma_tag_t,
173 					  bus_dmamap_t *, size_t, int);
174 static void mvsata_edma_resource_purge(struct mvsata_port *, bus_dma_tag_t,
175 				       bus_dmamap_t, void *);
176 static int mvsata_dma_bufload(struct mvsata_port *, int, void *, size_t, int);
177 static inline void mvsata_dma_bufunload(struct mvsata_port *, int, int);
178 #endif
179 
180 static void mvsata_hreset_port(struct mvsata_port *);
181 static void mvsata_reset_port(struct mvsata_port *);
182 static void mvsata_reset_hc(struct mvsata_hc *);
183 static uint32_t mvsata_softreset(struct mvsata_port *, int);
184 #ifndef MVSATA_WITHOUTDMA
185 static void mvsata_edma_reset_qptr(struct mvsata_port *);
186 static inline void mvsata_edma_enable(struct mvsata_port *);
187 static int mvsata_edma_disable(struct mvsata_port *, int, int);
188 static void mvsata_edma_config(struct mvsata_port *, int);
189 
190 static void mvsata_edma_setup_crqb(struct mvsata_port *, int, int,
191 				   struct ata_bio  *);
192 #endif
193 static uint32_t mvsata_read_preamps_gen1(struct mvsata_port *);
194 static void mvsata_fix_phy_gen1(struct mvsata_port *);
195 static void mvsata_devconn_gen1(struct mvsata_port *);
196 
197 static uint32_t mvsata_read_preamps_gen2(struct mvsata_port *);
198 static void mvsata_fix_phy_gen2(struct mvsata_port *);
199 #ifndef MVSATA_WITHOUTDMA
200 static void mvsata_edma_setup_crqb_gen2e(struct mvsata_port *, int, int,
201 					 struct ata_bio  *);
202 
203 #ifdef MVSATA_DEBUG
204 static void mvsata_print_crqb(struct mvsata_port *, int);
205 static void mvsata_print_crpb(struct mvsata_port *, int);
206 static void mvsata_print_eprd(struct mvsata_port *, int);
207 #endif
208 
209 struct ata_bustype mvsata_ata_bustype = {
210 	SCSIPI_BUSTYPE_ATA,
211 	mvsata_bio,
212 	mvsata_reset_drive,
213 	mvsata_reset_channel,
214 	mvsata_exec_command,
215 	ata_get_params,
216 	mvsata_addref,
217 	mvsata_delref,
218 	mvsata_killpending
219 };
220 
221 #if NATAPIBUS > 0
222 static const struct scsipi_bustype mvsata_atapi_bustype = {
223 	SCSIPI_BUSTYPE_ATAPI,
224 	atapi_scsipi_cmd,
225 	atapi_interpret_sense,
226 	atapi_print_addr,
227 	mvsata_atapi_kill_pending,
228 	NULL,
229 };
230 #endif /* NATAPIBUS */
231 #endif
232 
233 static void
234 mvsata_pmp_select(struct mvsata_port *mvport, int pmpport)
235 {
236 	uint32_t ifctl;
237 
238 	KASSERT(pmpport < PMP_MAX_DRIVES);
239 #if defined(DIAGNOSTIC) || defined(MVSATA_DEBUG)
240 	if ((MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) != 0) {
241 		panic("EDMA enabled");
242 	}
243 #endif
244 
245 	ifctl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICTL);
246 	ifctl &= ~0xf;
247 	ifctl |= pmpport;
248 	MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICTL, ifctl);
249 }
250 
251 int
252 mvsata_attach(struct mvsata_softc *sc, struct mvsata_product *product,
253 	      int (*mvsata_sreset)(struct mvsata_softc *),
254 	      int (*mvsata_misc_reset)(struct mvsata_softc *),
255 	      int read_pre_amps)
256 {
257 	struct mvsata_hc *mvhc;
258 	struct mvsata_port *mvport;
259 	uint32_t (*read_preamps)(struct mvsata_port *) = NULL;
260 	void (*_fix_phy)(struct mvsata_port *) = NULL;
261 #ifndef MVSATA_WITHOUTDMA
262 	void (*edma_setup_crqb)
263 	    (struct mvsata_port *, int, int, struct ata_bio *) = NULL;
264 #endif
265 	int hc, port, channel;
266 
267 	aprint_normal_dev(MVSATA_DEV(sc), "Gen%s, %dhc, %dport/hc\n",
268 	    (product->generation == gen1) ? "I" :
269 	    ((product->generation == gen2) ? "II" : "IIe"),
270 	    product->hc, product->port);
271 
272 
273 	switch (product->generation) {
274 	case gen1:
275 		mvsata_sreset = NULL;
276 		read_pre_amps = 1;	/* MUST */
277 		read_preamps = mvsata_read_preamps_gen1;
278 		_fix_phy = mvsata_fix_phy_gen1;
279 #ifndef MVSATA_WITHOUTDMA
280 		edma_setup_crqb = mvsata_edma_setup_crqb;
281 #endif
282 		break;
283 
284 	case gen2:
285 		read_preamps = mvsata_read_preamps_gen2;
286 		_fix_phy = mvsata_fix_phy_gen2;
287 #ifndef MVSATA_WITHOUTDMA
288 		edma_setup_crqb = mvsata_edma_setup_crqb;
289 #endif
290 		break;
291 
292 	case gen2e:
293 		read_preamps = mvsata_read_preamps_gen2;
294 		_fix_phy = mvsata_fix_phy_gen2;
295 #ifndef MVSATA_WITHOUTDMA
296 		edma_setup_crqb = mvsata_edma_setup_crqb_gen2e;
297 #endif
298 		break;
299 	}
300 
301 	sc->sc_gen = product->generation;
302 	sc->sc_hc = product->hc;
303 	sc->sc_port = product->port;
304 	sc->sc_flags = product->flags;
305 
306 #ifdef MVSATA_WITHOUTDMA
307 	sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DATA16;
308 #else
309 	sc->sc_edma_setup_crqb = edma_setup_crqb;
310 	sc->sc_wdcdev.sc_atac.atac_cap |=
311 	    (ATAC_CAP_DATA16 | ATAC_CAP_DMA | ATAC_CAP_UDMA);
312 #endif
313 	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
314 #ifdef MVSATA_WITHOUTDMA
315 	sc->sc_wdcdev.sc_atac.atac_dma_cap = 0;
316 	sc->sc_wdcdev.sc_atac.atac_udma_cap = 0;
317 #else
318 	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
319 	sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
320 #endif
321 	sc->sc_wdcdev.sc_atac.atac_channels = sc->sc_ata_channels;
322 	sc->sc_wdcdev.sc_atac.atac_nchannels = sc->sc_hc * sc->sc_port;
323 #ifndef MVSATA_WITHOUTDMA
324 	sc->sc_wdcdev.sc_atac.atac_bustype_ata = &mvsata_ata_bustype;
325 #if NATAPIBUS > 0
326 	sc->sc_wdcdev.sc_atac.atac_atapibus_attach = mvsata_atapibus_attach;
327 #endif
328 #endif
329 	sc->sc_wdcdev.wdc_maxdrives = 1;	/* SATA is always 1 drive */
330 	sc->sc_wdcdev.sc_atac.atac_probe = mvsata_probe_drive;
331 	sc->sc_wdcdev.sc_atac.atac_set_modes = mvsata_setup_channel;
332 
333 	sc->sc_wdc_regs =
334 	    malloc(sizeof(struct wdc_regs) * product->hc * product->port,
335 	    M_DEVBUF, M_NOWAIT);
336 	if (sc->sc_wdc_regs == NULL) {
337 		aprint_error_dev(MVSATA_DEV(sc),
338 		    "can't allocate wdc regs memory\n");
339 		return ENOMEM;
340 	}
341 	sc->sc_wdcdev.regs = sc->sc_wdc_regs;
342 
343 	for (hc = 0; hc < sc->sc_hc; hc++) {
344 		mvhc = &sc->sc_hcs[hc];
345 		mvhc->hc = hc;
346 		mvhc->hc_sc = sc;
347 		mvhc->hc_iot = sc->sc_iot;
348 		if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
349 		    hc * SATAHC_REGISTER_SIZE, SATAHC_REGISTER_SIZE,
350 		    &mvhc->hc_ioh)) {
351 			aprint_error_dev(MVSATA_DEV(sc),
352 			    "can't subregion SATAHC %d registers\n", hc);
353 			continue;
354 		}
355 
356 		for (port = 0; port < sc->sc_port; port++)
357 			if (mvsata_port_init(mvhc, port) == 0) {
358 				int pre_amps;
359 
360 				mvport = mvhc->hc_ports[port];
361 				pre_amps = read_pre_amps ?
362 				    read_preamps(mvport) : 0x00000720;
363 				mvport->_fix_phy_param.pre_amps = pre_amps;
364 				mvport->_fix_phy_param._fix_phy = _fix_phy;
365 
366 				if (!mvsata_sreset)
367 					mvsata_reset_port(mvport);
368 			}
369 
370 		if (!mvsata_sreset)
371 			mvsata_reset_hc(mvhc);
372 	}
373 	if (mvsata_sreset)
374 		mvsata_sreset(sc);
375 
376 	if (mvsata_misc_reset)
377 		mvsata_misc_reset(sc);
378 
379 	for (hc = 0; hc < sc->sc_hc; hc++)
380 		for (port = 0; port < sc->sc_port; port++) {
381 			mvport = sc->sc_hcs[hc].hc_ports[port];
382 			if (mvport == NULL)
383 				continue;
384 			if (mvsata_sreset)
385 				mvport->_fix_phy_param._fix_phy(mvport);
386 		}
387 	for (channel = 0; channel < sc->sc_hc * sc->sc_port; channel++)
388 		wdcattach(sc->sc_ata_channels[channel]);
389 
390 	return 0;
391 }
392 
393 int
394 mvsata_intr(struct mvsata_hc *mvhc)
395 {
396 	struct mvsata_softc *sc = mvhc->hc_sc;
397 	struct mvsata_port *mvport;
398 	uint32_t cause;
399 	int port, handled = 0;
400 
401 	cause = MVSATA_HC_READ_4(mvhc, SATAHC_IC);
402 
403 	DPRINTFN(3, ("%s:%d: mvsata_intr: cause=0x%08x\n",
404 	    device_xname(MVSATA_DEV(sc)), mvhc->hc, cause));
405 
406 	if (cause & SATAHC_IC_SAINTCOAL)
407 		MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, ~SATAHC_IC_SAINTCOAL);
408 	cause &= ~SATAHC_IC_SAINTCOAL;
409 	for (port = 0; port < sc->sc_port; port++) {
410 		mvport = mvhc->hc_ports[port];
411 
412 		if (cause & SATAHC_IC_DONE(port)) {
413 #ifndef MVSATA_WITHOUTDMA
414 			handled = mvsata_edma_handle(mvport, NULL);
415 #endif
416 			MVSATA_HC_WRITE_4(mvhc, SATAHC_IC,
417 			    ~SATAHC_IC_DONE(port));
418 		}
419 
420 		if (cause & SATAHC_IC_SADEVINTERRUPT(port)) {
421 			wdcintr(&mvport->port_ata_channel);
422 			MVSATA_HC_WRITE_4(mvhc, SATAHC_IC,
423 			    ~SATAHC_IC_SADEVINTERRUPT(port));
424 			handled = 1;
425 		}
426 	}
427 
428 	return handled;
429 }
430 
431 int
432 mvsata_error(struct mvsata_port *mvport)
433 {
434 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
435 	uint32_t cause;
436 
437 	cause = MVSATA_EDMA_READ_4(mvport, EDMA_IEC);
438 	/*
439 	 * We must ack SATA_SE and SATA_FISIC before acking coresponding bits
440 	 * in EDMA_IEC.
441 	 */
442 	if (cause & EDMA_IE_SERRINT) {
443 		MVSATA_EDMA_WRITE_4(mvport, SATA_SE,
444 		    MVSATA_EDMA_READ_4(mvport, SATA_SEIM));
445 	}
446 	if (cause & EDMA_IE_ETRANSINT) {
447 		MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC,
448 		    ~MVSATA_EDMA_READ_4(mvport, SATA_FISIM));
449 	}
450 	MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, ~cause);
451 
452 	DPRINTFN(3, ("%s:%d:%d:"
453 	    " mvsata_error: cause=0x%08x, mask=0x%08x, status=0x%08x\n",
454 	    device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
455 	    mvport->port, cause, MVSATA_EDMA_READ_4(mvport, EDMA_IEM),
456 	    MVSATA_EDMA_READ_4(mvport, EDMA_S)));
457 
458 	cause &= MVSATA_EDMA_READ_4(mvport, EDMA_IEM);
459 	if (!cause)
460 		return 0;
461 
462 	if (cause & EDMA_IE_EDEVDIS) {
463 		aprint_normal("%s:%d:%d: device disconnect\n",
464 		    device_xname(MVSATA_DEV2(mvport)),
465 		    mvport->port_hc->hc, mvport->port);
466 	}
467 	if (cause & EDMA_IE_EDEVCON) {
468 		if (sc->sc_gen == gen1)
469 			mvsata_devconn_gen1(mvport);
470 
471 		DPRINTFN(3, ("    device connected\n"));
472 	}
473 #ifndef MVSATA_WITHOUTDMA
474 	if ((sc->sc_gen == gen1 && cause & EDMA_IE_ETRANSINT) ||
475 	    (sc->sc_gen != gen1 && cause & EDMA_IE_ESELFDIS)) {
476 		switch (mvport->port_edmamode) {
477 		case dma:
478 		case queued:
479 		case ncq:
480 			mvsata_edma_reset_qptr(mvport);
481 			mvsata_edma_enable(mvport);
482 			if (cause & EDMA_IE_EDEVERR)
483 				break;
484 
485 			/* FALLTHROUGH */
486 
487 		case nodma:
488 		default:
489 			aprint_error(
490 			    "%s:%d:%d: EDMA self disable happen 0x%x\n",
491 			    device_xname(MVSATA_DEV2(mvport)),
492 			    mvport->port_hc->hc, mvport->port, cause);
493 			break;
494 		}
495 	}
496 #endif
497 	if (cause & EDMA_IE_ETRANSINT) {
498 		/* hot plug the Port Multiplier */
499 		aprint_normal("%s:%d:%d: detect Port Multiplier?\n",
500 		    device_xname(MVSATA_DEV2(mvport)),
501 		    mvport->port_hc->hc, mvport->port);
502 	}
503 
504 	return 1;
505 }
506 
507 
508 /*
509  * ATA callback entry points
510  */
511 
512 static void
513 mvsata_probe_drive(struct ata_channel *chp)
514 {
515 	struct mvsata_port * const mvport = (struct mvsata_port *)chp;
516 	uint32_t sstat, sig;
517 
518 	sstat = sata_reset_interface(chp, mvport->port_iot,
519 	    mvport->port_sata_scontrol, mvport->port_sata_sstatus, AT_WAIT);
520 	switch (sstat) {
521 	case SStatus_DET_DEV:
522 		mvsata_pmp_select(mvport, PMP_PORT_CTL);
523 		sig = mvsata_softreset(mvport, AT_WAIT);
524 		sata_interpret_sig(chp, 0, sig);
525 		break;
526 	default:
527 		break;
528 	}
529 }
530 
531 #ifndef MVSATA_WITHOUTDMA
532 static int
533 mvsata_bio(struct ata_drive_datas *drvp, struct ata_bio *ata_bio)
534 {
535 	struct ata_channel *chp = drvp->chnl_softc;
536 	struct atac_softc *atac = chp->ch_atac;
537 	struct ata_xfer *xfer;
538 
539 	DPRINTFN(1, ("%s:%d: mvsata_bio: drive=%d, blkno=%" PRId64
540 	    ", bcount=%ld\n", device_xname(atac->atac_dev), chp->ch_channel,
541 	    drvp->drive, ata_bio->blkno, ata_bio->bcount));
542 
543 	xfer = ata_get_xfer(ATAXF_NOSLEEP);
544 	if (xfer == NULL)
545 		return ATACMD_TRY_AGAIN;
546 	if (atac->atac_cap & ATAC_CAP_NOIRQ)
547 		ata_bio->flags |= ATA_POLL;
548 	if (ata_bio->flags & ATA_POLL)
549 		xfer->c_flags |= C_POLL;
550 	if ((drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) &&
551 	    (ata_bio->flags & ATA_SINGLE) == 0)
552 		xfer->c_flags |= C_DMA;
553 	xfer->c_drive = drvp->drive;
554 	xfer->c_cmd = ata_bio;
555 	xfer->c_databuf = ata_bio->databuf;
556 	xfer->c_bcount = ata_bio->bcount;
557 	xfer->c_start = mvsata_bio_start;
558 	xfer->c_intr = mvsata_bio_intr;
559 	xfer->c_kill_xfer = mvsata_bio_kill_xfer;
560 	ata_exec_xfer(chp, xfer);
561 	return (ata_bio->flags & ATA_ITSDONE) ? ATACMD_COMPLETE : ATACMD_QUEUED;
562 }
563 
564 static void
565 mvsata_reset_drive(struct ata_drive_datas *drvp, int flags, uint32_t *sigp)
566 {
567 	struct ata_channel *chp = drvp->chnl_softc;
568 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
569 	uint32_t edma_c;
570 	uint32_t sig;
571 
572 	edma_c = MVSATA_EDMA_READ_4(mvport, EDMA_CMD);
573 
574 	DPRINTF(("%s:%d: mvsata_reset_drive: drive=%d (EDMA %sactive)\n",
575 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drvp->drive,
576 	    (edma_c & EDMA_CMD_EENEDMA) ? "" : "not "));
577 
578 	if (edma_c & EDMA_CMD_EENEDMA)
579 		mvsata_edma_disable(mvport, 10000, flags & AT_WAIT);
580 
581 	mvsata_pmp_select(mvport, drvp->drive);
582 
583 	sig = mvsata_softreset(mvport, flags & AT_WAIT);
584 
585 	if (sigp)
586 		*sigp = sig;
587 
588 	if (edma_c & EDMA_CMD_EENEDMA) {
589 		mvsata_edma_reset_qptr(mvport);
590 		mvsata_edma_enable(mvport);
591 	}
592 	return;
593 }
594 
595 static void
596 mvsata_reset_channel(struct ata_channel *chp, int flags)
597 {
598 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
599 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
600 	struct ata_xfer *xfer;
601 	uint32_t sstat, ctrl;
602 	int i;
603 
604 	DPRINTF(("%s: mvsata_reset_channel: channel=%d\n",
605 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel));
606 
607 	mvsata_hreset_port(mvport);
608 	sstat = sata_reset_interface(chp, mvport->port_iot,
609 	    mvport->port_sata_scontrol, mvport->port_sata_sstatus, flags);
610 
611 	if (flags & AT_WAIT && sstat == SStatus_DET_DEV_NE &&
612 	    sc->sc_gen != gen1) {
613 		/* Downgrade to GenI */
614 		const uint32_t val = SControl_IPM_NONE | SControl_SPD_ANY |
615 		    SControl_DET_DISABLE;
616 
617 		MVSATA_EDMA_WRITE_4(mvport, mvport->port_sata_scontrol, val);
618 
619 		ctrl = MVSATA_EDMA_READ_4(mvport, SATA_SATAICFG);
620 		ctrl &= ~(1 << 17);	/* Disable GenII */
621 		MVSATA_EDMA_WRITE_4(mvport, SATA_SATAICFG, ctrl);
622 
623 		mvsata_hreset_port(mvport);
624 		sata_reset_interface(chp, mvport->port_iot,
625 		    mvport->port_sata_scontrol, mvport->port_sata_sstatus,
626 		    flags);
627 	}
628 
629 	for (i = 0; i < MVSATA_EDMAQ_LEN; i++) {
630 		xfer = mvport->port_reqtbl[i].xfer;
631 		if (xfer == NULL)
632 			continue;
633 		chp->ch_queue->active_xfer = xfer;
634 		xfer->c_kill_xfer(chp, xfer, KILL_RESET);
635 	}
636 
637 	mvsata_edma_config(mvport, mvport->port_edmamode);
638 	mvsata_edma_reset_qptr(mvport);
639 	mvsata_edma_enable(mvport);
640 	return;
641 }
642 
643 
644 static int
645 mvsata_exec_command(struct ata_drive_datas *drvp, struct ata_command *ata_c)
646 {
647 	struct ata_channel *chp = drvp->chnl_softc;
648 #ifdef MVSATA_DEBUG
649 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
650 #endif
651 	struct ata_xfer *xfer;
652 	int rv, s;
653 
654 	DPRINTFN(1, ("%s:%d: mvsata_exec_command: drive=%d, bcount=%d,"
655 	    " r_lba=0x%012"PRIx64", r_count=0x%04x, r_features=0x%04x,"
656 	    " r_device=0x%02x, r_command=0x%02x\n",
657 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel,
658 	    drvp->drive, ata_c->bcount, ata_c->r_lba, ata_c->r_count,
659 	    ata_c->r_features, ata_c->r_device, ata_c->r_command));
660 
661 	xfer = ata_get_xfer(ata_c->flags & AT_WAIT ? ATAXF_CANSLEEP :
662 	    ATAXF_NOSLEEP);
663 	if (xfer == NULL)
664 		return ATACMD_TRY_AGAIN;
665 	if (ata_c->flags & AT_POLL)
666 		xfer->c_flags |= C_POLL;
667 	if (ata_c->flags & AT_WAIT)
668 		xfer->c_flags |= C_WAIT;
669 	xfer->c_drive = drvp->drive;
670 	xfer->c_databuf = ata_c->data;
671 	xfer->c_bcount = ata_c->bcount;
672 	xfer->c_cmd = ata_c;
673 	xfer->c_start = mvsata_wdc_cmd_start;
674 	xfer->c_intr = mvsata_wdc_cmd_intr;
675 	xfer->c_kill_xfer = mvsata_wdc_cmd_kill_xfer;
676 	s = splbio();
677 	ata_exec_xfer(chp, xfer);
678 #ifdef DIAGNOSTIC
679 	if ((ata_c->flags & AT_POLL) != 0 &&
680 	    (ata_c->flags & AT_DONE) == 0)
681 		panic("mvsata_exec_command: polled command not done");
682 #endif
683 	if (ata_c->flags & AT_DONE)
684 		rv = ATACMD_COMPLETE;
685 	else {
686 		if (ata_c->flags & AT_WAIT) {
687 			while ((ata_c->flags & AT_DONE) == 0)
688 				tsleep(ata_c, PRIBIO, "mvsatacmd", 0);
689 			rv = ATACMD_COMPLETE;
690 		} else
691 			rv = ATACMD_QUEUED;
692 	}
693 	splx(s);
694 	return rv;
695 }
696 
697 static int
698 mvsata_addref(struct ata_drive_datas *drvp)
699 {
700 
701 	return 0;
702 }
703 
704 static void
705 mvsata_delref(struct ata_drive_datas *drvp)
706 {
707 
708 	return;
709 }
710 
711 static void
712 mvsata_killpending(struct ata_drive_datas *drvp)
713 {
714 
715 	return;
716 }
717 
718 #if NATAPIBUS > 0
719 static void
720 mvsata_atapibus_attach(struct atabus_softc *ata_sc)
721 {
722 	struct ata_channel *chp = ata_sc->sc_chan;
723 	struct atac_softc *atac = chp->ch_atac;
724 	struct scsipi_adapter *adapt = &atac->atac_atapi_adapter._generic;
725 	struct scsipi_channel *chan = &chp->ch_atapi_channel;
726 
727 	/*
728 	 * Fill in the scsipi_adapter.
729 	 */
730 	adapt->adapt_dev = atac->atac_dev;
731 	adapt->adapt_nchannels = atac->atac_nchannels;
732 	adapt->adapt_request = mvsata_atapi_scsipi_request;
733 	adapt->adapt_minphys = mvsata_atapi_minphys;
734 	atac->atac_atapi_adapter.atapi_probe_device = mvsata_atapi_probe_device;
735 
736         /*
737 	 * Fill in the scsipi_channel.
738 	 */
739 	memset(chan, 0, sizeof(*chan));
740 	chan->chan_adapter = adapt;
741 	chan->chan_bustype = &mvsata_atapi_bustype;
742 	chan->chan_channel = chp->ch_channel;
743 	chan->chan_flags = SCSIPI_CHAN_OPENINGS;
744 	chan->chan_openings = 1;
745 	chan->chan_max_periph = 1;
746 	chan->chan_ntargets = 1;
747 	chan->chan_nluns = 1;
748 
749 	chp->atapibus =
750 	    config_found_ia(ata_sc->sc_dev, "atapi", chan, atapiprint);
751 }
752 
753 static void
754 mvsata_atapi_scsipi_request(struct scsipi_channel *chan,
755 			    scsipi_adapter_req_t req, void *arg)
756 {
757 	struct scsipi_adapter *adapt = chan->chan_adapter;
758 	struct scsipi_periph *periph;
759 	struct scsipi_xfer *sc_xfer;
760 	struct mvsata_softc *sc = device_private(adapt->adapt_dev);
761 	struct atac_softc *atac = &sc->sc_wdcdev.sc_atac;
762 	struct ata_xfer *xfer;
763 	int channel = chan->chan_channel;
764 	int drive, s;
765 
766         switch (req) {
767 	case ADAPTER_REQ_RUN_XFER:
768 		sc_xfer = arg;
769 		periph = sc_xfer->xs_periph;
770 		drive = periph->periph_target;
771 
772 		if (!device_is_active(atac->atac_dev)) {
773 			sc_xfer->error = XS_DRIVER_STUFFUP;
774 			scsipi_done(sc_xfer);
775 			return;
776 		}
777 		xfer = ata_get_xfer(ATAXF_NOSLEEP);
778 		if (xfer == NULL) {
779 			sc_xfer->error = XS_RESOURCE_SHORTAGE;
780 			scsipi_done(sc_xfer);
781 			return;
782 		}
783 
784 		if (sc_xfer->xs_control & XS_CTL_POLL)
785 			xfer->c_flags |= C_POLL;
786 		xfer->c_drive = drive;
787 		xfer->c_flags |= C_ATAPI;
788 		xfer->c_cmd = sc_xfer;
789 		xfer->c_databuf = sc_xfer->data;
790 		xfer->c_bcount = sc_xfer->datalen;
791 		xfer->c_start = mvsata_atapi_start;
792 		xfer->c_intr = mvsata_atapi_intr;
793 		xfer->c_kill_xfer = mvsata_atapi_kill_xfer;
794 		xfer->c_dscpoll = 0;
795 		s = splbio();
796 		ata_exec_xfer(atac->atac_channels[channel], xfer);
797 #ifdef DIAGNOSTIC
798 		if ((sc_xfer->xs_control & XS_CTL_POLL) != 0 &&
799 		    (sc_xfer->xs_status & XS_STS_DONE) == 0)
800 			panic("mvsata_atapi_scsipi_request:"
801 			    " polled command not done");
802 #endif
803 		splx(s);
804 		return;
805 
806 	default:
807 		/* Not supported, nothing to do. */
808 		;
809 	}
810 }
811 
812 static void
813 mvsata_atapi_minphys(struct buf *bp)
814 {
815 
816 	if (bp->b_bcount > MAXPHYS)
817 		bp->b_bcount = MAXPHYS;
818 	minphys(bp);
819 }
820 
821 static void
822 mvsata_atapi_probe_device(struct atapibus_softc *sc, int target)
823 {
824 	struct scsipi_channel *chan = sc->sc_channel;
825 	struct scsipi_periph *periph;
826 	struct ataparams ids;
827 	struct ataparams *id = &ids;
828 	struct mvsata_softc *mvc =
829 	    device_private(chan->chan_adapter->adapt_dev);
830 	struct atac_softc *atac = &mvc->sc_wdcdev.sc_atac;
831 	struct ata_channel *chp = atac->atac_channels[chan->chan_channel];
832 	struct ata_drive_datas *drvp = &chp->ch_drive[target];
833 	struct scsipibus_attach_args sa;
834 	char serial_number[21], model[41], firmware_revision[9];
835 	int s;
836 
837 	/* skip if already attached */
838 	if (scsipi_lookup_periph(chan, target, 0) != NULL)
839 		return;
840 
841 	/* if no ATAPI device detected at attach time, skip */
842 	if (drvp->drive_type != ATA_DRIVET_ATAPI) {
843 		DPRINTF(("%s:%d: mvsata_atapi_probe_device:"
844 		    " drive %d not present\n",
845 		    device_xname(atac->atac_dev), chp->ch_channel, target));
846 		return;
847 	}
848 
849         /* Some ATAPI devices need a bit more time after software reset. */
850 	delay(5000);
851 	if (ata_get_params(drvp, AT_WAIT, id) == 0) {
852 #ifdef ATAPI_DEBUG_PROBE
853 		log(LOG_DEBUG, "%s:%d: drive %d: cmdsz 0x%x drqtype 0x%x\n",
854 		    device_xname(atac->atac_dev), chp->ch_channel, target,
855 		    id->atap_config & ATAPI_CFG_CMD_MASK,
856 		    id->atap_config & ATAPI_CFG_DRQ_MASK);
857 #endif
858 		periph = scsipi_alloc_periph(M_NOWAIT);
859 		if (periph == NULL) {
860 			aprint_error_dev(atac->atac_dev,
861 			    "unable to allocate periph"
862 			    " for channel %d drive %d\n",
863 			    chp->ch_channel, target);
864 			return;
865 		}
866 		periph->periph_dev = NULL;
867 		periph->periph_channel = chan;
868 		periph->periph_switch = &atapi_probe_periphsw;
869 		periph->periph_target = target;
870 		periph->periph_lun = 0;
871 		periph->periph_quirks = PQUIRK_ONLYBIG;
872 
873 #ifdef SCSIPI_DEBUG
874 		if (SCSIPI_DEBUG_TYPE == SCSIPI_BUSTYPE_ATAPI &&
875 		    SCSIPI_DEBUG_TARGET == target)
876 			periph->periph_dbflags |= SCSIPI_DEBUG_FLAGS;
877 #endif
878 		periph->periph_type = ATAPI_CFG_TYPE(id->atap_config);
879 		if (id->atap_config & ATAPI_CFG_REMOV)
880 			periph->periph_flags |= PERIPH_REMOVABLE;
881 		if (periph->periph_type == T_SEQUENTIAL) {
882 			s = splbio();
883 			drvp->drive_flags |= ATA_DRIVE_ATAPIDSCW;
884 			splx(s);
885 		}
886 
887 		sa.sa_periph = periph;
888 		sa.sa_inqbuf.type = ATAPI_CFG_TYPE(id->atap_config);
889 		sa.sa_inqbuf.removable = id->atap_config & ATAPI_CFG_REMOV ?
890 		    T_REMOV : T_FIXED;
891 		strnvisx(model, sizeof(model), id->atap_model, 40,
892 		    VIS_TRIM|VIS_SAFE|VIS_OCTAL);
893 		strnvisx(serial_number, sizeof(serial_number), id->atap_serial,
894 		    20, VIS_TRIM|VIS_SAFE|VIS_OCTAL);
895 		strnvisx(firmware_revision, sizeof(firmware_revision),
896 		    id->atap_revision, 8, VIS_TRIM|VIS_SAFE|VIS_OCTAL);
897 		sa.sa_inqbuf.vendor = model;
898 		sa.sa_inqbuf.product = serial_number;
899 		sa.sa_inqbuf.revision = firmware_revision;
900 
901 		/*
902 		 * Determine the operating mode capabilities of the device.
903 		 */
904 		if ((id->atap_config & ATAPI_CFG_CMD_MASK) == ATAPI_CFG_CMD_16)
905 			periph->periph_cap |= PERIPH_CAP_CMD16;
906 		/* XXX This is gross. */
907 		periph->periph_cap |= (id->atap_config & ATAPI_CFG_DRQ_MASK);
908 
909 		drvp->drv_softc = atapi_probe_device(sc, target, periph, &sa);
910 
911 		if (drvp->drv_softc)
912 			ata_probe_caps(drvp);
913 		else {
914 			s = splbio();
915 			drvp->drive_type = ATA_DRIVET_NONE;
916 			splx(s);
917 		}
918 	} else {
919 		DPRINTF(("%s:%d: mvsata_atapi_probe_device:"
920 		    " ATAPI_IDENTIFY_DEVICE failed for drive %d: error 0x%x\n",
921 		    device_xname(atac->atac_dev), chp->ch_channel, target,
922 		    chp->ch_error));
923 		s = splbio();
924 		drvp->drive_type = ATA_DRIVET_NONE;
925 		splx(s);
926 	}
927 }
928 
929 /*
930  * Kill off all pending xfers for a periph.
931  *
932  * Must be called at splbio().
933  */
934 static void
935 mvsata_atapi_kill_pending(struct scsipi_periph *periph)
936 {
937 	struct atac_softc *atac =
938 	    device_private(periph->periph_channel->chan_adapter->adapt_dev);
939 	struct ata_channel *chp =
940 	    atac->atac_channels[periph->periph_channel->chan_channel];
941 
942 	ata_kill_pending(&chp->ch_drive[periph->periph_target]);
943 }
944 #endif	/* NATAPIBUS > 0 */
945 #endif	/* MVSATA_WITHOUTDMA */
946 
947 
948 /*
949  * mvsata_setup_channel()
950  *   Setup EDMA registers and prepare/purge DMA resources.
951  *   We assuming already stopped the EDMA.
952  */
953 static void
954 mvsata_setup_channel(struct ata_channel *chp)
955 {
956 #if !defined(MVSATA_WITHOUTDMA) || defined(MVSATA_DEBUG)
957 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
958 #endif
959 	struct ata_drive_datas *drvp;
960 	uint32_t edma_mode;
961 	int drive, s;
962 #ifndef MVSATA_WITHOUTDMA
963 	int i;
964 	const int crqb_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN;
965 	const int crpb_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN;
966 	const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN;
967 #endif
968 
969 	DPRINTF(("%s:%d: mvsata_setup_channel: ",
970 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel));
971 
972 	edma_mode = nodma;
973 	for (drive = 0; drive < chp->ch_ndrives; drive++) {
974 		drvp = &chp->ch_drive[drive];
975 
976 		/* If no drive, skip */
977 		if (drvp->drive_type == ATA_DRIVET_NONE)
978 			continue;
979 
980 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
981 			/* use Ultra/DMA */
982 			s = splbio();
983 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
984 			splx(s);
985 		}
986 
987 		if (drvp->drive_flags & (ATA_DRIVE_UDMA | ATA_DRIVE_DMA))
988 			if (drvp->drive_type == ATA_DRIVET_ATA)
989 				edma_mode = dma;
990 	}
991 
992 	DPRINTF(("EDMA %sactive mode\n", (edma_mode == nodma) ? "not " : ""));
993 
994 #ifndef MVSATA_WITHOUTDMA
995 	if (edma_mode == nodma) {
996 no_edma:
997 		if (mvport->port_crqb != NULL)
998 			mvsata_edma_resource_purge(mvport, mvport->port_dmat,
999 			    mvport->port_crqb_dmamap, mvport->port_crqb);
1000 		if (mvport->port_crpb != NULL)
1001 			mvsata_edma_resource_purge(mvport, mvport->port_dmat,
1002 			    mvport->port_crpb_dmamap, mvport->port_crpb);
1003 		if (mvport->port_eprd != NULL)
1004 			mvsata_edma_resource_purge(mvport, mvport->port_dmat,
1005 			    mvport->port_eprd_dmamap, mvport->port_eprd);
1006 
1007 		return;
1008 	}
1009 
1010 	if (mvport->port_crqb == NULL)
1011 		mvport->port_crqb = mvsata_edma_resource_prepare(mvport,
1012 		    mvport->port_dmat, &mvport->port_crqb_dmamap, crqb_size, 1);
1013 	if (mvport->port_crpb == NULL)
1014 		mvport->port_crpb = mvsata_edma_resource_prepare(mvport,
1015 		    mvport->port_dmat, &mvport->port_crpb_dmamap, crpb_size, 0);
1016 	if (mvport->port_eprd == NULL) {
1017 		mvport->port_eprd = mvsata_edma_resource_prepare(mvport,
1018 		    mvport->port_dmat, &mvport->port_eprd_dmamap, eprd_buf_size,
1019 		    1);
1020 		for (i = 0; i < MVSATA_EDMAQ_LEN; i++) {
1021 			mvport->port_reqtbl[i].eprd_offset =
1022 			    i * MVSATA_EPRD_MAX_SIZE;
1023 			mvport->port_reqtbl[i].eprd = mvport->port_eprd +
1024 			    i * MVSATA_EPRD_MAX_SIZE / sizeof(struct eprd);
1025 		}
1026 	}
1027 
1028 	if (mvport->port_crqb == NULL || mvport->port_crpb == NULL ||
1029 	    mvport->port_eprd == NULL) {
1030 		aprint_error_dev(MVSATA_DEV2(mvport),
1031 		    "channel %d: can't use EDMA\n", chp->ch_channel);
1032 		s = splbio();
1033 		for (drive = 0; drive < chp->ch_ndrives; drive++) {
1034 			drvp = &chp->ch_drive[drive];
1035 
1036 			/* If no drive, skip */
1037 			if (drvp->drive_type == ATA_DRIVET_NONE)
1038 				continue;
1039 
1040 			drvp->drive_flags &= ~(ATA_DRIVE_UDMA | ATA_DRIVE_DMA);
1041 		}
1042 		splx(s);
1043 		goto no_edma;
1044 	}
1045 
1046 	mvsata_edma_config(mvport, edma_mode);
1047 	mvsata_edma_reset_qptr(mvport);
1048 	mvsata_edma_enable(mvport);
1049 #endif
1050 }
1051 
1052 #ifndef MVSATA_WITHOUTDMA
1053 static void
1054 mvsata_bio_start(struct ata_channel *chp, struct ata_xfer *xfer)
1055 {
1056 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1057 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
1058 	struct atac_softc *atac = chp->ch_atac;
1059 	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1060 	struct ata_bio *ata_bio = xfer->c_cmd;
1061 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1062 	int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1063 	u_int16_t cyl;
1064 	u_int8_t head, sect, cmd = 0;
1065 	int nblks, error;
1066 
1067 	DPRINTFN(2, ("%s:%d: mvsata_bio_start: drive=%d\n",
1068 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1069 
1070 	if (xfer->c_flags & C_DMA)
1071 		if (drvp->n_xfers <= NXFER)
1072 			drvp->n_xfers++;
1073 
1074 again:
1075 	/*
1076 	 *
1077 	 * When starting a multi-sector transfer, or doing single-sector
1078 	 * transfers...
1079 	 */
1080 	if (xfer->c_skip == 0 || (ata_bio->flags & ATA_SINGLE) != 0) {
1081 		if (ata_bio->flags & ATA_SINGLE)
1082 			nblks = 1;
1083 		else
1084 			nblks = xfer->c_bcount / ata_bio->lp->d_secsize;
1085 		/* Check for bad sectors and adjust transfer, if necessary. */
1086 		if ((ata_bio->lp->d_flags & D_BADSECT) != 0) {
1087 			long blkdiff;
1088 			int i;
1089 
1090 			for (i = 0; (blkdiff = ata_bio->badsect[i]) != -1;
1091 			    i++) {
1092 				blkdiff -= ata_bio->blkno;
1093 				if (blkdiff < 0)
1094 					continue;
1095 				if (blkdiff == 0)
1096 					/* Replace current block of transfer. */
1097 					ata_bio->blkno =
1098 					    ata_bio->lp->d_secperunit -
1099 					    ata_bio->lp->d_nsectors - i - 1;
1100 				if (blkdiff < nblks) {
1101 					/* Bad block inside transfer. */
1102 					ata_bio->flags |= ATA_SINGLE;
1103 					nblks = 1;
1104 				}
1105 				break;
1106 			}
1107 			/* Transfer is okay now. */
1108 		}
1109 		if (xfer->c_flags & C_DMA) {
1110 			ata_bio->nblks = nblks;
1111 			ata_bio->nbytes = xfer->c_bcount;
1112 
1113 			if (xfer->c_flags & C_POLL)
1114 				sc->sc_enable_intr(mvport, 0 /*off*/);
1115 			error = mvsata_edma_enqueue(mvport, ata_bio,
1116 			    (char *)xfer->c_databuf + xfer->c_skip);
1117 			if (error) {
1118 				if (error == EINVAL) {
1119 					/*
1120 					 * We can't do DMA on this transfer
1121 					 * for some reason.  Fall back to
1122 					 * PIO.
1123 					 */
1124 					xfer->c_flags &= ~C_DMA;
1125 					error = 0;
1126 					goto do_pio;
1127 				}
1128 				if (error == EBUSY) {
1129 					aprint_error_dev(atac->atac_dev,
1130 					    "channel %d: EDMA Queue full\n",
1131 					    chp->ch_channel);
1132 					/*
1133 					 * XXXX: Perhaps, after it waits for
1134 					 * a while, it is necessary to call
1135 					 * bio_start again.
1136 					 */
1137 				}
1138 				ata_bio->error = ERR_DMA;
1139 				ata_bio->r_error = 0;
1140 				mvsata_bio_done(chp, xfer);
1141 				return;
1142 			}
1143 			chp->ch_flags |= ATACH_DMA_WAIT;
1144 			/* start timeout machinery */
1145 			if ((xfer->c_flags & C_POLL) == 0)
1146 				callout_reset(&chp->ch_callout,
1147 				    ATA_DELAY / 1000 * hz,
1148 				    mvsata_edma_timeout, xfer);
1149 			/* wait for irq */
1150 			goto intr;
1151 		} /* else not DMA */
1152 do_pio:
1153 		if (ata_bio->flags & ATA_LBA48) {
1154 			sect = 0;
1155 			cyl =  0;
1156 			head = 0;
1157 		} else if (ata_bio->flags & ATA_LBA) {
1158 			sect = (ata_bio->blkno >> 0) & 0xff;
1159 			cyl = (ata_bio->blkno >> 8) & 0xffff;
1160 			head = (ata_bio->blkno >> 24) & 0x0f;
1161 			head |= WDSD_LBA;
1162 		} else {
1163 			int blkno = ata_bio->blkno;
1164 			sect = blkno % ata_bio->lp->d_nsectors;
1165 			sect++;	/* Sectors begin with 1, not 0. */
1166 			blkno /= ata_bio->lp->d_nsectors;
1167 			head = blkno % ata_bio->lp->d_ntracks;
1168 			blkno /= ata_bio->lp->d_ntracks;
1169 			cyl = blkno;
1170 			head |= WDSD_CHS;
1171 		}
1172 		ata_bio->nblks = min(nblks, ata_bio->multi);
1173 		ata_bio->nbytes = ata_bio->nblks * ata_bio->lp->d_secsize;
1174 		KASSERT(nblks == 1 || (ata_bio->flags & ATA_SINGLE) == 0);
1175 		if (ata_bio->nblks > 1)
1176 			cmd = (ata_bio->flags & ATA_READ) ?
1177 			    WDCC_READMULTI : WDCC_WRITEMULTI;
1178 		else
1179 			cmd = (ata_bio->flags & ATA_READ) ?
1180 			    WDCC_READ : WDCC_WRITE;
1181 
1182 		/* EDMA disable, if enabled this channel. */
1183 		if (mvport->port_edmamode != nodma)
1184 			mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1185 
1186 		mvsata_pmp_select(mvport, xfer->c_drive);
1187 
1188 		/* Do control operations specially. */
1189 		if (__predict_false(drvp->state < READY)) {
1190 			/*
1191 			 * Actually, we want to be careful not to mess with
1192 			 * the control state if the device is currently busy,
1193 			 * but we can assume that we never get to this point
1194 			 * if that's the case.
1195 			 */
1196 			/*
1197 			 * If it's not a polled command, we need the kernel
1198 			 * thread
1199 			 */
1200 			if ((xfer->c_flags & C_POLL) == 0 && cpu_intr_p()) {
1201 				chp->ch_queue->queue_freeze++;
1202 				wakeup(&chp->ch_thread);
1203 				return;
1204 			}
1205 			if (mvsata_bio_ready(mvport, ata_bio, xfer->c_drive,
1206 			    (xfer->c_flags & C_POLL) ? AT_POLL : 0) != 0) {
1207 				mvsata_bio_done(chp, xfer);
1208 				return;
1209 			}
1210 		}
1211 
1212 		/* Initiate command! */
1213 		MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1214 		switch(wdc_wait_for_ready(chp, ATA_DELAY, wait_flags)) {
1215 		case WDCWAIT_OK:
1216 			break;
1217 		case WDCWAIT_TOUT:
1218 			goto timeout;
1219 		case WDCWAIT_THR:
1220 			return;
1221 		}
1222 		if (ata_bio->flags & ATA_LBA48)
1223 			wdccommandext(chp, 0, atacmd_to48(cmd),
1224 			    ata_bio->blkno, nblks, 0, WDSD_LBA);
1225 		else
1226 			wdccommand(chp, 0, cmd, cyl,
1227 			    head, sect, nblks,
1228 			    (ata_bio->lp->d_type == DKTYPE_ST506) ?
1229 			    ata_bio->lp->d_precompcyl / 4 : 0);
1230 
1231 		/* start timeout machinery */
1232 		if ((xfer->c_flags & C_POLL) == 0)
1233 			callout_reset(&chp->ch_callout,
1234 			    ATA_DELAY / 1000 * hz, wdctimeout, chp);
1235 	} else if (ata_bio->nblks > 1) {
1236 		/* The number of blocks in the last stretch may be smaller. */
1237 		nblks = xfer->c_bcount / ata_bio->lp->d_secsize;
1238 		if (ata_bio->nblks > nblks) {
1239 			ata_bio->nblks = nblks;
1240 			ata_bio->nbytes = xfer->c_bcount;
1241 		}
1242 	}
1243 	/* If this was a write and not using DMA, push the data. */
1244 	if ((ata_bio->flags & ATA_READ) == 0) {
1245 		/*
1246 		 * we have to busy-wait here, we can't rely on running in
1247 		 * thread context.
1248 		 */
1249 		if (wdc_wait_for_drq(chp, ATA_DELAY, AT_POLL) != 0) {
1250 			aprint_error_dev(atac->atac_dev,
1251 			    "channel %d: drive %d timeout waiting for DRQ,"
1252 			    " st=0x%02x, err=0x%02x\n",
1253 			    chp->ch_channel, xfer->c_drive, chp->ch_status,
1254 			    chp->ch_error);
1255 			ata_bio->error = TIMEOUT;
1256 			mvsata_bio_done(chp, xfer);
1257 			return;
1258 		}
1259 		if (chp->ch_status & WDCS_ERR) {
1260 			ata_bio->error = ERROR;
1261 			ata_bio->r_error = chp->ch_error;
1262 			mvsata_bio_done(chp, xfer);
1263 			return;
1264 		}
1265 
1266 		wdc->dataout_pio(chp, drvp->drive_flags,
1267 		    (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes);
1268 	}
1269 
1270 intr:
1271 	/* Wait for IRQ (either real or polled) */
1272 	if ((ata_bio->flags & ATA_POLL) == 0) {
1273 		chp->ch_flags |= ATACH_IRQ_WAIT;
1274 	} else {
1275 		/* Wait for at last 400ns for status bit to be valid */
1276 		delay(1);
1277 		if (chp->ch_flags & ATACH_DMA_WAIT) {
1278 			mvsata_edma_wait(mvport, xfer, ATA_DELAY);
1279 			sc->sc_enable_intr(mvport, 1 /*on*/);
1280 			chp->ch_flags &= ~ATACH_DMA_WAIT;
1281 		}
1282 		mvsata_bio_intr(chp, xfer, 0);
1283 		if ((ata_bio->flags & ATA_ITSDONE) == 0)
1284 			goto again;
1285 	}
1286 	return;
1287 
1288 timeout:
1289 	aprint_error_dev(atac->atac_dev,
1290 	    "channel %d: drive %d not ready, st=0x%02x, err=0x%02x\n",
1291 	    chp->ch_channel, xfer->c_drive, chp->ch_status, chp->ch_error);
1292 	ata_bio->error = TIMEOUT;
1293 	mvsata_bio_done(chp, xfer);
1294 	return;
1295 }
1296 
1297 static int
1298 mvsata_bio_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
1299 {
1300 	struct atac_softc *atac = chp->ch_atac;
1301 	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1302 	struct ata_bio *ata_bio = xfer->c_cmd;
1303 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1304 
1305 	DPRINTFN(2, ("%s:%d: mvsata_bio_intr: drive=%d\n",
1306 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1307 
1308 	chp->ch_flags &= ~(ATACH_IRQ_WAIT|ATACH_DMA_WAIT);
1309 
1310 	/*
1311 	 * If we missed an interrupt transfer, reset and restart.
1312 	 * Don't try to continue transfer, we may have missed cycles.
1313 	 */
1314 	if (xfer->c_flags & C_TIMEOU) {
1315 		ata_bio->error = TIMEOUT;
1316 		mvsata_bio_done(chp, xfer);
1317 		return 1;
1318 	}
1319 
1320 	/* Is it not a transfer, but a control operation? */
1321 	if (!(xfer->c_flags & C_DMA) && drvp->state < READY) {
1322 		aprint_error_dev(atac->atac_dev,
1323 		    "channel %d: drive %d bad state %d in mvsata_bio_intr\n",
1324 		    chp->ch_channel, xfer->c_drive, drvp->state);
1325 		panic("mvsata_bio_intr: bad state");
1326 	}
1327 
1328 	/* Ack interrupt done by wdc_wait_for_unbusy */
1329 	if (!(xfer->c_flags & C_DMA) &&
1330 	    (wdc_wait_for_unbusy(chp, (irq == 0) ? ATA_DELAY : 0, AT_POLL)
1331 							== WDCWAIT_TOUT)) {
1332 		if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1333 			return 0;	/* IRQ was not for us */
1334 		aprint_error_dev(atac->atac_dev,
1335 		    "channel %d: drive %d timeout, c_bcount=%d, c_skip%d\n",
1336 		    chp->ch_channel, xfer->c_drive, xfer->c_bcount,
1337 		    xfer->c_skip);
1338 		ata_bio->error = TIMEOUT;
1339 		mvsata_bio_done(chp, xfer);
1340 		return 1;
1341 	}
1342 
1343 	if (xfer->c_flags & C_DMA) {
1344 		if (ata_bio->error == NOERROR)
1345 			goto end;
1346 		if (ata_bio->error == ERR_DMA)
1347 			ata_dmaerr(drvp,
1348 			    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
1349 	}
1350 
1351 	/* if we had an error, end */
1352 	if (ata_bio->error != NOERROR) {
1353 		mvsata_bio_done(chp, xfer);
1354 		return 1;
1355 	}
1356 
1357 	/* If this was a read and not using DMA, fetch the data. */
1358 	if ((ata_bio->flags & ATA_READ) != 0) {
1359 		if ((chp->ch_status & WDCS_DRQ) != WDCS_DRQ) {
1360 			aprint_error_dev(atac->atac_dev,
1361 			    "channel %d: drive %d read intr before drq\n",
1362 			    chp->ch_channel, xfer->c_drive);
1363 			ata_bio->error = TIMEOUT;
1364 			mvsata_bio_done(chp, xfer);
1365 			return 1;
1366 		}
1367 		wdc->datain_pio(chp, drvp->drive_flags,
1368 		    (char *)xfer->c_databuf + xfer->c_skip, ata_bio->nbytes);
1369 	}
1370 
1371 end:
1372 	ata_bio->blkno += ata_bio->nblks;
1373 	ata_bio->blkdone += ata_bio->nblks;
1374 	xfer->c_skip += ata_bio->nbytes;
1375 	xfer->c_bcount -= ata_bio->nbytes;
1376 	/* See if this transfer is complete. */
1377 	if (xfer->c_bcount > 0) {
1378 		if ((ata_bio->flags & ATA_POLL) == 0)
1379 			/* Start the next operation */
1380 			mvsata_bio_start(chp, xfer);
1381 		else
1382 			/* Let mvsata_bio_start do the loop */
1383 			return 1;
1384 	} else { /* Done with this transfer */
1385 		ata_bio->error = NOERROR;
1386 		mvsata_bio_done(chp, xfer);
1387 	}
1388 	return 1;
1389 }
1390 
1391 static void
1392 mvsata_bio_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer, int reason)
1393 {
1394 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1395 	struct atac_softc *atac = chp->ch_atac;
1396 	struct ata_bio *ata_bio = xfer->c_cmd;
1397 	int drive = xfer->c_drive;
1398 
1399 	DPRINTFN(2, ("%s:%d: mvsata_bio_kill_xfer: drive=%d\n",
1400 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
1401 
1402 	/* EDMA restart, if enabled */
1403 	if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) {
1404 		mvsata_edma_reset_qptr(mvport);
1405 		mvsata_edma_enable(mvport);
1406 	}
1407 
1408 	ata_free_xfer(chp, xfer);
1409 
1410 	ata_bio->flags |= ATA_ITSDONE;
1411 	switch (reason) {
1412 	case KILL_GONE:
1413 		ata_bio->error = ERR_NODEV;
1414 		break;
1415 	case KILL_RESET:
1416 		ata_bio->error = ERR_RESET;
1417 		break;
1418 	default:
1419 		aprint_error_dev(atac->atac_dev,
1420 		    "mvsata_bio_kill_xfer: unknown reason %d\n", reason);
1421 		panic("mvsata_bio_kill_xfer");
1422 	}
1423 	ata_bio->r_error = WDCE_ABRT;
1424 	(*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc);
1425 }
1426 
1427 static void
1428 mvsata_bio_done(struct ata_channel *chp, struct ata_xfer *xfer)
1429 {
1430 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1431 	struct ata_bio *ata_bio = xfer->c_cmd;
1432 	int drive = xfer->c_drive;
1433 
1434 	DPRINTFN(2, ("%s:%d: mvsata_bio_done: drive=%d, flags=0x%x\n",
1435 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive,
1436 	    (u_int)xfer->c_flags));
1437 
1438 	callout_stop(&chp->ch_callout);
1439 
1440 	/* EDMA restart, if enabled */
1441 	if (!(xfer->c_flags & C_DMA) && mvport->port_edmamode != nodma) {
1442 		mvsata_edma_reset_qptr(mvport);
1443 		mvsata_edma_enable(mvport);
1444 	}
1445 
1446 	/* feed back residual bcount to our caller */
1447 	ata_bio->bcount = xfer->c_bcount;
1448 
1449 	/* mark controller inactive and free xfer */
1450 	KASSERT(chp->ch_queue->active_xfer != NULL);
1451 	chp->ch_queue->active_xfer = NULL;
1452 	ata_free_xfer(chp, xfer);
1453 
1454 	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1455 		ata_bio->error = ERR_NODEV;
1456 		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1457 		wakeup(&chp->ch_queue->active_xfer);
1458 	}
1459 	ata_bio->flags |= ATA_ITSDONE;
1460 	(*chp->ch_drive[drive].drv_done)(chp->ch_drive[drive].drv_softc);
1461 	atastart(chp);
1462 }
1463 
1464 static int
1465 mvsata_bio_ready(struct mvsata_port *mvport, struct ata_bio *ata_bio, int drive,
1466 		 int flags)
1467 {
1468 	struct ata_channel *chp = &mvport->port_ata_channel;
1469 	struct atac_softc *atac = chp->ch_atac;
1470 	struct ata_drive_datas *drvp = &chp->ch_drive[drive];
1471 	const char *errstring;
1472 
1473 	flags |= AT_POLL;	/* XXX */
1474 
1475 	/*
1476 	 * disable interrupts, all commands here should be quick
1477 	 * enough to be able to poll, and we don't go here that often
1478 	 */
1479 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1480 	MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1481 	DELAY(10);
1482 	errstring = "wait";
1483 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1484 		goto ctrltimeout;
1485 	wdccommandshort(chp, 0, WDCC_RECAL);
1486 	/* Wait for at least 400ns for status bit to be valid */
1487 	DELAY(1);
1488 	errstring = "recal";
1489 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1490 		goto ctrltimeout;
1491 	if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1492 		goto ctrlerror;
1493 	/* Don't try to set modes if controller can't be adjusted */
1494 	if (atac->atac_set_modes == NULL)
1495 		goto geometry;
1496 	/* Also don't try if the drive didn't report its mode */
1497 	if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0)
1498 		goto geometry;
1499 	wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1500 	    0x08 | drvp->PIO_mode, WDSF_SET_MODE);
1501 	errstring = "piomode";
1502 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1503 		goto ctrltimeout;
1504 	if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1505 		goto ctrlerror;
1506 	if (drvp->drive_flags & ATA_DRIVE_UDMA)
1507 		wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1508 		    0x40 | drvp->UDMA_mode, WDSF_SET_MODE);
1509 	else if (drvp->drive_flags & ATA_DRIVE_DMA)
1510 		wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1511 		    0x20 | drvp->DMA_mode, WDSF_SET_MODE);
1512 	else
1513 		goto geometry;
1514 	errstring = "dmamode";
1515 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1516 		goto ctrltimeout;
1517 	if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1518 		goto ctrlerror;
1519 geometry:
1520 	if (ata_bio->flags & ATA_LBA)
1521 		goto multimode;
1522 	wdccommand(chp, 0, WDCC_IDP, ata_bio->lp->d_ncylinders,
1523 	    ata_bio->lp->d_ntracks - 1, 0, ata_bio->lp->d_nsectors,
1524 	    (ata_bio->lp->d_type == DKTYPE_ST506) ?
1525 	    ata_bio->lp->d_precompcyl / 4 : 0);
1526 	errstring = "geometry";
1527 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1528 		goto ctrltimeout;
1529 	if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1530 		goto ctrlerror;
1531 multimode:
1532 	if (ata_bio->multi == 1)
1533 		goto ready;
1534 	wdccommand(chp, 0, WDCC_SETMULTI, 0, 0, 0, ata_bio->multi, 0);
1535 	errstring = "setmulti";
1536 	if (wdcwait(chp, WDCS_DRDY, WDCS_DRDY, ATA_DELAY, flags))
1537 		goto ctrltimeout;
1538 	if (chp->ch_status & (WDCS_ERR | WDCS_DWF))
1539 		goto ctrlerror;
1540 ready:
1541 	drvp->state = READY;
1542 	/*
1543 	 * The drive is usable now
1544 	 */
1545 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1546 	delay(10);	/* some drives need a little delay here */
1547 	return 0;
1548 
1549 ctrltimeout:
1550 	aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s timed out\n",
1551 	    chp->ch_channel, drive, errstring);
1552 	ata_bio->error = TIMEOUT;
1553 	goto ctrldone;
1554 ctrlerror:
1555 	aprint_error_dev(atac->atac_dev, "channel %d: drive %d %s ",
1556 	    chp->ch_channel, drive, errstring);
1557 	if (chp->ch_status & WDCS_DWF) {
1558 		aprint_error("drive fault\n");
1559 		ata_bio->error = ERR_DF;
1560 	} else {
1561 		aprint_error("error (%x)\n", chp->ch_error);
1562 		ata_bio->r_error = chp->ch_error;
1563 		ata_bio->error = ERROR;
1564 	}
1565 ctrldone:
1566 	drvp->state = 0;
1567 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1568 	return -1;
1569 }
1570 
1571 static void
1572 mvsata_wdc_cmd_start(struct ata_channel *chp, struct ata_xfer *xfer)
1573 {
1574 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1575 	int drive = xfer->c_drive;
1576 	int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1577 	struct ata_command *ata_c = xfer->c_cmd;
1578 
1579 	DPRINTFN(1, ("%s:%d: mvsata_cmd_start: drive=%d\n",
1580 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, drive));
1581 
1582 	/* First, EDMA disable, if enabled this channel. */
1583 	if (mvport->port_edmamode != nodma)
1584 		mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1585 
1586 	mvsata_pmp_select(mvport, drive);
1587 
1588 	MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1589 	switch(wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ,
1590 	    ata_c->r_st_bmask, ata_c->timeout, wait_flags)) {
1591 	case WDCWAIT_OK:
1592 		break;
1593 	case WDCWAIT_TOUT:
1594 		ata_c->flags |= AT_TIMEOU;
1595 		mvsata_wdc_cmd_done(chp, xfer);
1596 		return;
1597 	case WDCWAIT_THR:
1598 		return;
1599 	}
1600 	if (ata_c->flags & AT_POLL)
1601 		/* polled command, disable interrupts */
1602 		MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1603 	if ((ata_c->flags & AT_LBA48) != 0) {
1604 		wdccommandext(chp, 0, ata_c->r_command,
1605 		    ata_c->r_lba, ata_c->r_count, ata_c->r_features,
1606 		    ata_c->r_device & ~0x10);
1607 	} else {
1608 		wdccommand(chp, 0, ata_c->r_command,
1609 		    (ata_c->r_lba >> 8) & 0xffff,
1610 		    (((ata_c->flags & AT_LBA) != 0) ? WDSD_LBA : 0) |
1611 		    ((ata_c->r_lba >> 24) & 0x0f),
1612 		    ata_c->r_lba & 0xff,
1613 		    ata_c->r_count & 0xff,
1614 		    ata_c->r_features & 0xff);
1615 	}
1616 
1617 	if ((ata_c->flags & AT_POLL) == 0) {
1618 		chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for interrupt */
1619 		callout_reset(&chp->ch_callout, ata_c->timeout / 1000 * hz,
1620 		    wdctimeout, chp);
1621 		return;
1622 	}
1623 	/*
1624 	 * Polled command. Wait for drive ready or drq. Done in intr().
1625 	 * Wait for at last 400ns for status bit to be valid.
1626 	 */
1627 	delay(10);	/* 400ns delay */
1628 	mvsata_wdc_cmd_intr(chp, xfer, 0);
1629 }
1630 
1631 static int
1632 mvsata_wdc_cmd_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
1633 {
1634 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1635 	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
1636 	struct ata_command *ata_c = xfer->c_cmd;
1637 	int bcount = ata_c->bcount;
1638 	char *data = ata_c->data;
1639 	int wflags;
1640 	int drive_flags;
1641 
1642 	if (ata_c->r_command == WDCC_IDENTIFY ||
1643 	    ata_c->r_command == ATAPI_IDENTIFY_DEVICE)
1644 		/*
1645 		 * The IDENTIFY data has been designed as an array of
1646 		 * u_int16_t, so we can byteswap it on the fly.
1647 		 * Historically it's what we have always done so keeping it
1648 		 * here ensure binary backward compatibility.
1649 		 */
1650 		drive_flags = ATA_DRIVE_NOSTREAM |
1651 		    chp->ch_drive[xfer->c_drive].drive_flags;
1652 	else
1653 		/*
1654 		 * Other data structure are opaque and should be transfered
1655 		 * as is.
1656 		 */
1657 		drive_flags = chp->ch_drive[xfer->c_drive].drive_flags;
1658 
1659 	if ((ata_c->flags & (AT_WAIT | AT_POLL)) == (AT_WAIT | AT_POLL))
1660 		/* both wait and poll, we can tsleep here */
1661 		wflags = AT_WAIT | AT_POLL;
1662 	else
1663 		wflags = AT_POLL;
1664 
1665 again:
1666 	DPRINTFN(1, ("%s:%d: mvsata_cmd_intr: drive=%d\n",
1667 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive));
1668 
1669 	/*
1670 	 * after a ATAPI_SOFT_RESET, the device will have released the bus.
1671 	 * Reselect again, it doesn't hurt for others commands, and the time
1672 	 * penalty for the extra register write is acceptable,
1673 	 * wdc_exec_command() isn't called often (mostly for autoconfig)
1674 	 */
1675 	if ((xfer->c_flags & C_ATAPI) != 0) {
1676 		MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1677 	}
1678 	if ((ata_c->flags & AT_XFDONE) != 0) {
1679 		/*
1680 		 * We have completed a data xfer. The drive should now be
1681 		 * in its initial state
1682 		 */
1683 		if (wdcwait(chp, ata_c->r_st_bmask | WDCS_DRQ,
1684 		    ata_c->r_st_bmask, (irq == 0)  ? ata_c->timeout : 0,
1685 		    wflags) ==  WDCWAIT_TOUT) {
1686 			if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1687 				return 0;	/* IRQ was not for us */
1688 			ata_c->flags |= AT_TIMEOU;
1689 		}
1690 		goto out;
1691 	}
1692 	if (wdcwait(chp, ata_c->r_st_pmask, ata_c->r_st_pmask,
1693 	    (irq == 0)  ? ata_c->timeout : 0, wflags) == WDCWAIT_TOUT) {
1694 		if (irq && (xfer->c_flags & C_TIMEOU) == 0)
1695 		    return 0;	/* IRQ was not for us */
1696 		ata_c->flags |= AT_TIMEOU;
1697 		goto out;
1698 	}
1699 	delay(20);	/* XXXXX: Delay more times. */
1700 	if (ata_c->flags & AT_READ) {
1701 		if ((chp->ch_status & WDCS_DRQ) == 0) {
1702 			ata_c->flags |= AT_TIMEOU;
1703 			goto out;
1704 		}
1705 		wdc->datain_pio(chp, drive_flags, data, bcount);
1706 		/* at this point the drive should be in its initial state */
1707 		ata_c->flags |= AT_XFDONE;
1708 		/*
1709 		 * XXX checking the status register again here cause some
1710 		 * hardware to timeout.
1711 		 */
1712 	} else if (ata_c->flags & AT_WRITE) {
1713 		if ((chp->ch_status & WDCS_DRQ) == 0) {
1714 			ata_c->flags |= AT_TIMEOU;
1715 			goto out;
1716 		}
1717 		wdc->dataout_pio(chp, drive_flags, data, bcount);
1718 		ata_c->flags |= AT_XFDONE;
1719 		if ((ata_c->flags & AT_POLL) == 0) {
1720 			chp->ch_flags |= ATACH_IRQ_WAIT; /* wait for intr */
1721 			callout_reset(&chp->ch_callout,
1722 			    mstohz(ata_c->timeout), wdctimeout, chp);
1723 			return 1;
1724 		} else
1725 			goto again;
1726 	}
1727 out:
1728 	mvsata_wdc_cmd_done(chp, xfer);
1729 	return 1;
1730 }
1731 
1732 static void
1733 mvsata_wdc_cmd_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer,
1734 			 int reason)
1735 {
1736 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1737 	struct ata_command *ata_c = xfer->c_cmd;
1738 
1739 	DPRINTFN(1, ("%s:%d: mvsata_cmd_kill_xfer: drive=%d\n",
1740 	    device_xname(MVSATA_DEV2(mvport)), chp->ch_channel, xfer->c_drive));
1741 
1742 	switch (reason) {
1743 	case KILL_GONE:
1744 		ata_c->flags |= AT_GONE;
1745 		break;
1746 	case KILL_RESET:
1747 		ata_c->flags |= AT_RESET;
1748 		break;
1749 	default:
1750 		aprint_error_dev(MVSATA_DEV2(mvport),
1751 		    "mvsata_cmd_kill_xfer: unknown reason %d\n", reason);
1752 		panic("mvsata_cmd_kill_xfer");
1753 	}
1754 	mvsata_wdc_cmd_done_end(chp, xfer);
1755 }
1756 
1757 static void
1758 mvsata_wdc_cmd_done(struct ata_channel *chp, struct ata_xfer *xfer)
1759 {
1760 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1761 	struct atac_softc *atac = chp->ch_atac;
1762 	struct ata_command *ata_c = xfer->c_cmd;
1763 
1764 	DPRINTFN(1, ("%s:%d: mvsata_cmd_done: drive=%d, flags=0x%x\n",
1765 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
1766 	    ata_c->flags));
1767 
1768 	if (chp->ch_status & WDCS_DWF)
1769 		ata_c->flags |= AT_DF;
1770 	if (chp->ch_status & WDCS_ERR) {
1771 		ata_c->flags |= AT_ERROR;
1772 		ata_c->r_error = chp->ch_error;
1773 	}
1774 	if ((ata_c->flags & AT_READREG) != 0 &&
1775 	    device_is_active(atac->atac_dev) &&
1776 	    (ata_c->flags & (AT_ERROR | AT_DF)) == 0) {
1777 		ata_c->r_status = MVSATA_WDC_READ_1(mvport, SRB_CS);
1778 		ata_c->r_error = MVSATA_WDC_READ_1(mvport, SRB_FE);
1779 		ata_c->r_count = MVSATA_WDC_READ_1(mvport, SRB_SC);
1780 		ata_c->r_lba =
1781 		    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 0;
1782 		ata_c->r_lba |=
1783 		    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 8;
1784 		ata_c->r_lba |=
1785 		    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 16;
1786 		ata_c->r_device = MVSATA_WDC_READ_1(mvport, SRB_H);
1787 		if ((ata_c->flags & AT_LBA48) != 0) {
1788 			if ((ata_c->flags & AT_POLL) != 0) {
1789 				MVSATA_WDC_WRITE_1(mvport, SRB_CAS,
1790 				    WDCTL_HOB|WDCTL_4BIT|WDCTL_IDS);
1791 			} else {
1792 				MVSATA_WDC_WRITE_1(mvport, SRB_CAS,
1793 				    WDCTL_HOB|WDCTL_4BIT);
1794 			}
1795 			ata_c->r_count |=
1796 			    MVSATA_WDC_READ_1(mvport, SRB_SC) << 8;
1797 			ata_c->r_lba |=
1798 			    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 24;
1799 			ata_c->r_lba |=
1800 			    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 32;
1801 			ata_c->r_lba |=
1802 			    (uint64_t)MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 40;
1803 			if ((ata_c->flags & AT_POLL) != 0) {
1804 				MVSATA_WDC_WRITE_1(mvport, SRB_CAS,
1805 				    WDCTL_4BIT|WDCTL_IDS);
1806 			} else {
1807 				MVSATA_WDC_WRITE_1(mvport, SRB_CAS,
1808 				    WDCTL_4BIT);
1809 			}
1810 		} else {
1811 			ata_c->r_lba |=
1812 			    (uint64_t)(ata_c->r_device & 0x0f) << 24;
1813 		}
1814 	}
1815 	callout_stop(&chp->ch_callout);
1816 	chp->ch_queue->active_xfer = NULL;
1817 	if (ata_c->flags & AT_POLL) {
1818 		/* enable interrupts */
1819 		MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1820 		delay(10);	/* some drives need a little delay here */
1821 	}
1822 	if (chp->ch_drive[xfer->c_drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
1823 		mvsata_wdc_cmd_kill_xfer(chp, xfer, KILL_GONE);
1824 		chp->ch_drive[xfer->c_drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
1825 		wakeup(&chp->ch_queue->active_xfer);
1826 	} else
1827 		mvsata_wdc_cmd_done_end(chp, xfer);
1828 }
1829 
1830 static void
1831 mvsata_wdc_cmd_done_end(struct ata_channel *chp, struct ata_xfer *xfer)
1832 {
1833 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1834 	struct ata_command *ata_c = xfer->c_cmd;
1835 
1836 	/* EDMA restart, if enabled */
1837 	if (mvport->port_edmamode != nodma) {
1838 		mvsata_edma_reset_qptr(mvport);
1839 		mvsata_edma_enable(mvport);
1840 	}
1841 
1842 	ata_c->flags |= AT_DONE;
1843 	ata_free_xfer(chp, xfer);
1844 	if (ata_c->flags & AT_WAIT)
1845 		wakeup(ata_c);
1846 	else if (ata_c->callback)
1847 		ata_c->callback(ata_c->callback_arg);
1848 	atastart(chp);
1849 
1850 	return;
1851 }
1852 
1853 #if NATAPIBUS > 0
1854 static void
1855 mvsata_atapi_start(struct ata_channel *chp, struct ata_xfer *xfer)
1856 {
1857 	struct mvsata_softc *sc = (struct mvsata_softc *)chp->ch_atac;
1858 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
1859 	struct atac_softc *atac = &sc->sc_wdcdev.sc_atac;
1860 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
1861 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
1862 	const int wait_flags = (xfer->c_flags & C_POLL) ? AT_POLL : 0;
1863 	const char *errstring;
1864 
1865 	DPRINTFN(2, ("%s:%d:%d: mvsata_atapi_start: scsi flags 0x%x\n",
1866 	    device_xname(chp->ch_atac->atac_dev), chp->ch_channel,
1867 	    xfer->c_drive, sc_xfer->xs_control));
1868 
1869 	if (mvport->port_edmamode != nodma)
1870 		mvsata_edma_disable(mvport, 10 /* ms */, wait_flags);
1871 
1872 	mvsata_pmp_select(mvport, xfer->c_drive);
1873 
1874 	if ((xfer->c_flags & C_DMA) && (drvp->n_xfers <= NXFER))
1875 		drvp->n_xfers++;
1876 
1877 	/* Do control operations specially. */
1878 	if (__predict_false(drvp->state < READY)) {
1879 		/* If it's not a polled command, we need the kernel thread */
1880 		if ((sc_xfer->xs_control & XS_CTL_POLL) == 0 && cpu_intr_p()) {
1881 			chp->ch_queue->queue_freeze++;
1882 			wakeup(&chp->ch_thread);
1883 			return;
1884 		}
1885 		/*
1886 		 * disable interrupts, all commands here should be quick
1887 		 * enough to be able to poll, and we don't go here that often
1888 		 */
1889 		MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT | WDCTL_IDS);
1890 
1891 		MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1892 		/* Don't try to set mode if controller can't be adjusted */
1893 		if (atac->atac_set_modes == NULL)
1894 			goto ready;
1895 		/* Also don't try if the drive didn't report its mode */
1896 		if ((drvp->drive_flags & ATA_DRIVE_MODE) == 0)
1897 			goto ready;
1898 		errstring = "unbusy";
1899 		if (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags))
1900 			goto timeout;
1901 		wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1902 		    0x08 | drvp->PIO_mode, WDSF_SET_MODE);
1903 		errstring = "piomode";
1904 		if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
1905 			goto timeout;
1906 		if (chp->ch_status & WDCS_ERR) {
1907 			if (chp->ch_error == WDCE_ABRT) {
1908 				/*
1909 				 * Some ATAPI drives reject PIO settings.
1910 				 * Fall back to PIO mode 3 since that's the
1911 				 * minimum for ATAPI.
1912 				 */
1913 				aprint_error_dev(atac->atac_dev,
1914 				    "channel %d drive %d: PIO mode %d rejected,"
1915 				    " falling back to PIO mode 3\n",
1916 				    chp->ch_channel, xfer->c_drive,
1917 				    drvp->PIO_mode);
1918 				if (drvp->PIO_mode > 3)
1919 					drvp->PIO_mode = 3;
1920 			} else
1921 				goto error;
1922 		}
1923 		if (drvp->drive_flags & ATA_DRIVE_UDMA)
1924 			wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1925 			    0x40 | drvp->UDMA_mode, WDSF_SET_MODE);
1926 		else
1927 		if (drvp->drive_flags & ATA_DRIVE_DMA)
1928 			wdccommand(chp, 0, SET_FEATURES, 0, 0, 0,
1929 			    0x20 | drvp->DMA_mode, WDSF_SET_MODE);
1930 		else
1931 			goto ready;
1932 		errstring = "dmamode";
1933 		if (wdc_wait_for_unbusy(chp, ATAPI_MODE_DELAY, wait_flags))
1934 			goto timeout;
1935 		if (chp->ch_status & WDCS_ERR) {
1936 			if (chp->ch_error == WDCE_ABRT) {
1937 				if (drvp->drive_flags & ATA_DRIVE_UDMA)
1938 					goto error;
1939 				else {
1940 					/*
1941 					 * The drive rejected our DMA setting.
1942 					 * Fall back to mode 1.
1943 					 */
1944 					aprint_error_dev(atac->atac_dev,
1945 					    "channel %d drive %d:"
1946 					    " DMA mode %d rejected,"
1947 					    " falling back to DMA mode 0\n",
1948 					    chp->ch_channel, xfer->c_drive,
1949 					    drvp->DMA_mode);
1950 					if (drvp->DMA_mode > 0)
1951 						drvp->DMA_mode = 0;
1952 				}
1953 			} else
1954 				goto error;
1955 		}
1956 ready:
1957 		drvp->state = READY;
1958 		MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
1959 		delay(10); /* some drives need a little delay here */
1960 	}
1961 	/* start timeout machinery */
1962 	if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
1963 		callout_reset(&chp->ch_callout, mstohz(sc_xfer->timeout),
1964 		    wdctimeout, chp);
1965 
1966 	MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
1967 	switch (wdc_wait_for_unbusy(chp, ATAPI_DELAY, wait_flags)  < 0) {
1968 	case WDCWAIT_OK:
1969 		break;
1970 	case WDCWAIT_TOUT:
1971 		aprint_error_dev(atac->atac_dev, "not ready, st = %02x\n",
1972 		    chp->ch_status);
1973 		sc_xfer->error = XS_TIMEOUT;
1974 		mvsata_atapi_reset(chp, xfer);
1975 		return;
1976 	case WDCWAIT_THR:
1977 		return;
1978 	}
1979 
1980 	/*
1981 	 * Even with WDCS_ERR, the device should accept a command packet
1982 	 * Limit length to what can be stuffed into the cylinder register
1983 	 * (16 bits).  Some CD-ROMs seem to interpret '0' as 65536,
1984 	 * but not all devices do that and it's not obvious from the
1985 	 * ATAPI spec that that behaviour should be expected.  If more
1986 	 * data is necessary, multiple data transfer phases will be done.
1987 	 */
1988 
1989 	wdccommand(chp, 0, ATAPI_PKT_CMD,
1990 	    xfer->c_bcount <= 0xffff ? xfer->c_bcount : 0xffff, 0, 0, 0,
1991 	    (xfer->c_flags & C_DMA) ? ATAPI_PKT_CMD_FTRE_DMA : 0);
1992 
1993 	/*
1994 	 * If there is no interrupt for CMD input, busy-wait for it (done in
1995 	 * the interrupt routine. If it is a polled command, call the interrupt
1996 	 * routine until command is done.
1997 	 */
1998 	if ((sc_xfer->xs_periph->periph_cap & ATAPI_CFG_DRQ_MASK) !=
1999 	    ATAPI_CFG_IRQ_DRQ || (sc_xfer->xs_control & XS_CTL_POLL)) {
2000 		/* Wait for at last 400ns for status bit to be valid */
2001 		DELAY(1);
2002 		mvsata_atapi_intr(chp, xfer, 0);
2003 	} else
2004 		chp->ch_flags |= ATACH_IRQ_WAIT;
2005 	if (sc_xfer->xs_control & XS_CTL_POLL) {
2006 		if (chp->ch_flags & ATACH_DMA_WAIT) {
2007 			wdc_dmawait(chp, xfer, sc_xfer->timeout);
2008 			chp->ch_flags &= ~ATACH_DMA_WAIT;
2009 		}
2010 		while ((sc_xfer->xs_status & XS_STS_DONE) == 0) {
2011 			/* Wait for at last 400ns for status bit to be valid */
2012 			DELAY(1);
2013 			mvsata_atapi_intr(chp, xfer, 0);
2014 		}
2015 	}
2016 	return;
2017 
2018 timeout:
2019 	aprint_error_dev(atac->atac_dev, "channel %d drive %d: %s timed out\n",
2020 	    chp->ch_channel, xfer->c_drive, errstring);
2021 	sc_xfer->error = XS_TIMEOUT;
2022 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
2023 	delay(10);		/* some drives need a little delay here */
2024 	mvsata_atapi_reset(chp, xfer);
2025 	return;
2026 
2027 error:
2028 	aprint_error_dev(atac->atac_dev,
2029 	    "channel %d drive %d: %s error (0x%x)\n",
2030 	    chp->ch_channel, xfer->c_drive, errstring, chp->ch_error);
2031 	sc_xfer->error = XS_SHORTSENSE;
2032 	sc_xfer->sense.atapi_sense = chp->ch_error;
2033 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
2034 	delay(10);		/* some drives need a little delay here */
2035 	mvsata_atapi_reset(chp, xfer);
2036 	return;
2037 }
2038 
2039 static int
2040 mvsata_atapi_intr(struct ata_channel *chp, struct ata_xfer *xfer, int irq)
2041 {
2042 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
2043 	struct atac_softc *atac = chp->ch_atac;
2044 	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
2045 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2046 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
2047 	int len, phase, ire, error, retries=0, i;
2048 	void *cmd;
2049 
2050 	DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr\n",
2051 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive));
2052 
2053 	/* Is it not a transfer, but a control operation? */
2054 	if (drvp->state < READY) {
2055 		aprint_error_dev(atac->atac_dev,
2056 		    "channel %d drive %d: bad state %d\n",
2057 		    chp->ch_channel, xfer->c_drive, drvp->state);
2058 		panic("mvsata_atapi_intr: bad state");
2059 	}
2060 	/*
2061 	 * If we missed an interrupt in a PIO transfer, reset and restart.
2062 	 * Don't try to continue transfer, we may have missed cycles.
2063 	 */
2064 	if ((xfer->c_flags & (C_TIMEOU | C_DMA)) == C_TIMEOU) {
2065 		sc_xfer->error = XS_TIMEOUT;
2066 		mvsata_atapi_reset(chp, xfer);
2067 		return 1;
2068 	}
2069 
2070 	/* Ack interrupt done in wdc_wait_for_unbusy */
2071 	MVSATA_WDC_WRITE_1(mvport, SRB_H, WDSD_IBM);
2072 	if (wdc_wait_for_unbusy(chp,
2073 	    (irq == 0) ? sc_xfer->timeout : 0, AT_POLL) == WDCWAIT_TOUT) {
2074 		if (irq && (xfer->c_flags & C_TIMEOU) == 0)
2075 			return 0; /* IRQ was not for us */
2076 		aprint_error_dev(atac->atac_dev,
2077 		    "channel %d: device timeout, c_bcount=%d, c_skip=%d\n",
2078 		    chp->ch_channel, xfer->c_bcount, xfer->c_skip);
2079 		if (xfer->c_flags & C_DMA)
2080 			ata_dmaerr(drvp,
2081 			    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2082 		sc_xfer->error = XS_TIMEOUT;
2083 		mvsata_atapi_reset(chp, xfer);
2084 		return 1;
2085 	}
2086 
2087 	/*
2088 	 * If we missed an IRQ and were using DMA, flag it as a DMA error
2089 	 * and reset device.
2090 	 */
2091 	if ((xfer->c_flags & C_TIMEOU) && (xfer->c_flags & C_DMA)) {
2092 		ata_dmaerr(drvp, (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2093 		sc_xfer->error = XS_RESET;
2094 		mvsata_atapi_reset(chp, xfer);
2095 		return (1);
2096 	}
2097 	/*
2098 	 * if the request sense command was aborted, report the short sense
2099 	 * previously recorded, else continue normal processing
2100 	 */
2101 
2102 again:
2103 	len = MVSATA_WDC_READ_1(mvport, SRB_LBAM) +
2104 	    256 * MVSATA_WDC_READ_1(mvport, SRB_LBAH);
2105 	ire = MVSATA_WDC_READ_1(mvport, SRB_SC);
2106 	phase = (ire & (WDCI_CMD | WDCI_IN)) | (chp->ch_status & WDCS_DRQ);
2107 	DPRINTF((
2108 	    "mvsata_atapi_intr: c_bcount %d len %d st 0x%x err 0x%x ire 0x%x :",
2109 	    xfer->c_bcount, len, chp->ch_status, chp->ch_error, ire));
2110 
2111 	switch (phase) {
2112 	case PHASE_CMDOUT:
2113 		cmd = sc_xfer->cmd;
2114 		DPRINTF(("PHASE_CMDOUT\n"));
2115 		/* Init the DMA channel if necessary */
2116 		if (xfer->c_flags & C_DMA) {
2117 			error = mvsata_bdma_init(mvport, sc_xfer,
2118 			    (char *)xfer->c_databuf + xfer->c_skip);
2119 			if (error) {
2120 				if (error == EINVAL) {
2121 					/*
2122 					 * We can't do DMA on this transfer
2123 					 * for some reason.  Fall back to PIO.
2124 					 */
2125 					xfer->c_flags &= ~C_DMA;
2126 					error = 0;
2127 				} else {
2128 					sc_xfer->error = XS_DRIVER_STUFFUP;
2129 					break;
2130 				}
2131 			}
2132 		}
2133 
2134 		/* send packet command */
2135 		/* Commands are 12 or 16 bytes long. It's 32-bit aligned */
2136 		wdc->dataout_pio(chp, drvp->drive_flags, cmd, sc_xfer->cmdlen);
2137 
2138 		/* Start the DMA channel if necessary */
2139 		if (xfer->c_flags & C_DMA) {
2140 			mvsata_bdma_start(mvport);
2141 			chp->ch_flags |= ATACH_DMA_WAIT;
2142 		}
2143 
2144 		if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2145 			chp->ch_flags |= ATACH_IRQ_WAIT;
2146 		return 1;
2147 
2148 	case PHASE_DATAOUT:
2149 		/* write data */
2150 		DPRINTF(("PHASE_DATAOUT\n"));
2151 		if ((sc_xfer->xs_control & XS_CTL_DATA_OUT) == 0 ||
2152 		    (xfer->c_flags & C_DMA) != 0) {
2153 			aprint_error_dev(atac->atac_dev,
2154 			    "channel %d drive %d: bad data phase DATAOUT\n",
2155 			    chp->ch_channel, xfer->c_drive);
2156 			if (xfer->c_flags & C_DMA)
2157 				ata_dmaerr(drvp,
2158 				    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2159 			sc_xfer->error = XS_TIMEOUT;
2160 			mvsata_atapi_reset(chp, xfer);
2161 			return 1;
2162 		}
2163 		xfer->c_lenoff = len - xfer->c_bcount;
2164 		if (xfer->c_bcount < len) {
2165 			aprint_error_dev(atac->atac_dev, "channel %d drive %d:"
2166 			    " warning: write only %d of %d requested bytes\n",
2167 			    chp->ch_channel, xfer->c_drive, xfer->c_bcount,
2168 			    len);
2169 			len = xfer->c_bcount;
2170 		}
2171 
2172 		wdc->dataout_pio(chp, drvp->drive_flags,
2173 		    (char *)xfer->c_databuf + xfer->c_skip, len);
2174 
2175 		for (i = xfer->c_lenoff; i > 0; i -= 2)
2176 			MVSATA_WDC_WRITE_2(mvport, SRB_PIOD, 0);
2177 
2178 		xfer->c_skip += len;
2179 		xfer->c_bcount -= len;
2180 		if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2181 			chp->ch_flags |= ATACH_IRQ_WAIT;
2182 		return 1;
2183 
2184 	case PHASE_DATAIN:
2185 		/* Read data */
2186 		DPRINTF(("PHASE_DATAIN\n"));
2187 		if ((sc_xfer->xs_control & XS_CTL_DATA_IN) == 0 ||
2188 		    (xfer->c_flags & C_DMA) != 0) {
2189 			aprint_error_dev(atac->atac_dev,
2190 			    "channel %d drive %d: bad data phase DATAIN\n",
2191 			    chp->ch_channel, xfer->c_drive);
2192 			if (xfer->c_flags & C_DMA)
2193 				ata_dmaerr(drvp,
2194 				    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2195 			sc_xfer->error = XS_TIMEOUT;
2196 			mvsata_atapi_reset(chp, xfer);
2197 			return 1;
2198 		}
2199 		xfer->c_lenoff = len - xfer->c_bcount;
2200 		if (xfer->c_bcount < len) {
2201 			aprint_error_dev(atac->atac_dev, "channel %d drive %d:"
2202 			    " warning: reading only %d of %d bytes\n",
2203 			    chp->ch_channel, xfer->c_drive, xfer->c_bcount,
2204 			    len);
2205 			len = xfer->c_bcount;
2206 		}
2207 
2208 		wdc->datain_pio(chp, drvp->drive_flags,
2209 		    (char *)xfer->c_databuf + xfer->c_skip, len);
2210 
2211 		if (xfer->c_lenoff > 0)
2212 			wdcbit_bucket(chp, len - xfer->c_bcount);
2213 
2214 		xfer->c_skip += len;
2215 		xfer->c_bcount -= len;
2216 		if ((sc_xfer->xs_control & XS_CTL_POLL) == 0)
2217 			chp->ch_flags |= ATACH_IRQ_WAIT;
2218 		return 1;
2219 
2220 	case PHASE_ABORTED:
2221 	case PHASE_COMPLETED:
2222 		DPRINTF(("PHASE_COMPLETED\n"));
2223 		if (xfer->c_flags & C_DMA)
2224 			xfer->c_bcount -= sc_xfer->datalen;
2225 		sc_xfer->resid = xfer->c_bcount;
2226 		mvsata_atapi_phase_complete(xfer);
2227 		return 1;
2228 
2229 	default:
2230 		if (++retries<500) {
2231 			DELAY(100);
2232 			chp->ch_status = MVSATA_WDC_READ_1(mvport, SRB_CS);
2233 			chp->ch_error = MVSATA_WDC_READ_1(mvport, SRB_FE);
2234 			goto again;
2235 		}
2236 		aprint_error_dev(atac->atac_dev,
2237 		    "channel %d drive %d: unknown phase 0x%x\n",
2238 		    chp->ch_channel, xfer->c_drive, phase);
2239 		if (chp->ch_status & WDCS_ERR) {
2240 			sc_xfer->error = XS_SHORTSENSE;
2241 			sc_xfer->sense.atapi_sense = chp->ch_error;
2242 		} else {
2243 			if (xfer->c_flags & C_DMA)
2244 				ata_dmaerr(drvp,
2245 				    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2246 			sc_xfer->error = XS_RESET;
2247 			mvsata_atapi_reset(chp, xfer);
2248 			return (1);
2249 		}
2250 	}
2251 	DPRINTF(("mvsata_atapi_intr: mvsata_atapi_done() (end), error 0x%x "
2252 	    "sense 0x%x\n", sc_xfer->error, sc_xfer->sense.atapi_sense));
2253 	mvsata_atapi_done(chp, xfer);
2254 	return 1;
2255 }
2256 
2257 static void
2258 mvsata_atapi_kill_xfer(struct ata_channel *chp, struct ata_xfer *xfer,
2259 		       int reason)
2260 {
2261 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
2262 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2263 
2264 	/* remove this command from xfer queue */
2265 	switch (reason) {
2266 	case KILL_GONE:
2267 		sc_xfer->error = XS_DRIVER_STUFFUP;
2268 		break;
2269 
2270 	case KILL_RESET:
2271 		sc_xfer->error = XS_RESET;
2272 		break;
2273 
2274 	default:
2275 		aprint_error_dev(MVSATA_DEV2(mvport),
2276 		    "mvsata_atapi_kill_xfer: unknown reason %d\n", reason);
2277 		panic("mvsata_atapi_kill_xfer");
2278 	}
2279 	ata_free_xfer(chp, xfer);
2280 	scsipi_done(sc_xfer);
2281 }
2282 
2283 static void
2284 mvsata_atapi_reset(struct ata_channel *chp, struct ata_xfer *xfer)
2285 {
2286 	struct atac_softc *atac = chp->ch_atac;
2287 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
2288 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2289 
2290 	mvsata_pmp_select(mvport, xfer->c_drive);
2291 
2292 	wdccommandshort(chp, 0, ATAPI_SOFT_RESET);
2293 	drvp->state = 0;
2294 	if (wdc_wait_for_unbusy(chp, WDC_RESET_WAIT, AT_POLL) != 0) {
2295 		printf("%s:%d:%d: reset failed\n", device_xname(atac->atac_dev),
2296 		    chp->ch_channel, xfer->c_drive);
2297 		sc_xfer->error = XS_SELTIMEOUT;
2298 	}
2299 	mvsata_atapi_done(chp, xfer);
2300 	return;
2301 }
2302 
2303 static void
2304 mvsata_atapi_phase_complete(struct ata_xfer *xfer)
2305 {
2306 	struct ata_channel *chp = xfer->c_chp;
2307 	struct atac_softc *atac = chp->ch_atac;
2308 	struct wdc_softc *wdc = CHAN_TO_WDC(chp);
2309 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2310 	struct ata_drive_datas *drvp = &chp->ch_drive[xfer->c_drive];
2311 
2312 	/* wait for DSC if needed */
2313 	if (drvp->drive_flags & ATA_DRIVE_ATAPIDSCW) {
2314 		DPRINTFN(1,
2315 		    ("%s:%d:%d: mvsata_atapi_phase_complete: polldsc %d\n",
2316 		    device_xname(atac->atac_dev), chp->ch_channel,
2317 		    xfer->c_drive, xfer->c_dscpoll));
2318 		if (cold)
2319 			panic("mvsata_atapi_phase_complete: cold");
2320 
2321 		if (wdcwait(chp, WDCS_DSC, WDCS_DSC, 10, AT_POLL) ==
2322 		    WDCWAIT_TOUT) {
2323 			/* 10ms not enough, try again in 1 tick */
2324 			if (xfer->c_dscpoll++ > mstohz(sc_xfer->timeout)) {
2325 				aprint_error_dev(atac->atac_dev,
2326 				    "channel %d: wait_for_dsc failed\n",
2327 				    chp->ch_channel);
2328 				sc_xfer->error = XS_TIMEOUT;
2329 				mvsata_atapi_reset(chp, xfer);
2330 				return;
2331 			} else
2332 				callout_reset(&chp->ch_callout, 1,
2333 				    mvsata_atapi_polldsc, xfer);
2334 			return;
2335 		}
2336 	}
2337 
2338 	/*
2339 	 * Some drive occasionally set WDCS_ERR with
2340 	 * "ATA illegal length indication" in the error
2341 	 * register. If we read some data the sense is valid
2342 	 * anyway, so don't report the error.
2343 	 */
2344 	if (chp->ch_status & WDCS_ERR &&
2345 	    ((sc_xfer->xs_control & XS_CTL_REQSENSE) == 0 ||
2346 	    sc_xfer->resid == sc_xfer->datalen)) {
2347 		/* save the short sense */
2348 		sc_xfer->error = XS_SHORTSENSE;
2349 		sc_xfer->sense.atapi_sense = chp->ch_error;
2350 		if ((sc_xfer->xs_periph->periph_quirks & PQUIRK_NOSENSE) == 0) {
2351 			/* ask scsipi to send a REQUEST_SENSE */
2352 			sc_xfer->error = XS_BUSY;
2353 			sc_xfer->status = SCSI_CHECK;
2354 		} else
2355 		    if (wdc->dma_status & (WDC_DMAST_NOIRQ | WDC_DMAST_ERR)) {
2356 			ata_dmaerr(drvp,
2357 			    (xfer->c_flags & C_POLL) ? AT_POLL : 0);
2358 			sc_xfer->error = XS_RESET;
2359 			mvsata_atapi_reset(chp, xfer);
2360 			return;
2361 		}
2362 	}
2363 	if (xfer->c_bcount != 0)
2364 		DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_intr:"
2365 		    " bcount value is %d after io\n",
2366 		    device_xname(atac->atac_dev), chp->ch_channel,
2367 		    xfer->c_drive, xfer->c_bcount));
2368 #ifdef DIAGNOSTIC
2369 	if (xfer->c_bcount < 0)
2370 		aprint_error_dev(atac->atac_dev,
2371 		    "channel %d drive %d: mvsata_atapi_intr:"
2372 		    " warning: bcount value is %d after io\n",
2373 		    chp->ch_channel, xfer->c_drive, xfer->c_bcount);
2374 #endif
2375 
2376 	DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_phase_complete:"
2377 	    " mvsata_atapi_done(), error 0x%x sense 0x%x\n",
2378 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
2379 	    sc_xfer->error, sc_xfer->sense.atapi_sense));
2380 	mvsata_atapi_done(chp, xfer);
2381 }
2382 
2383 static void
2384 mvsata_atapi_done(struct ata_channel *chp, struct ata_xfer *xfer)
2385 {
2386 	struct atac_softc *atac = chp->ch_atac;
2387 	struct scsipi_xfer *sc_xfer = xfer->c_cmd;
2388 	int drive = xfer->c_drive;
2389 
2390 	DPRINTFN(1, ("%s:%d:%d: mvsata_atapi_done: flags 0x%x\n",
2391 	    device_xname(atac->atac_dev), chp->ch_channel, xfer->c_drive,
2392 	    (u_int)xfer->c_flags));
2393 	callout_stop(&chp->ch_callout);
2394 	/* mark controller inactive and free the command */
2395 	chp->ch_queue->active_xfer = NULL;
2396 	ata_free_xfer(chp, xfer);
2397 
2398 	if (chp->ch_drive[drive].drive_flags & ATA_DRIVE_WAITDRAIN) {
2399 		sc_xfer->error = XS_DRIVER_STUFFUP;
2400 		chp->ch_drive[drive].drive_flags &= ~ATA_DRIVE_WAITDRAIN;
2401 		wakeup(&chp->ch_queue->active_xfer);
2402 	}
2403 
2404 	DPRINTFN(1, ("%s:%d: mvsata_atapi_done: scsipi_done\n",
2405 	    device_xname(atac->atac_dev), chp->ch_channel));
2406 	scsipi_done(sc_xfer);
2407 	DPRINTFN(1, ("%s:%d: atastart from wdc_atapi_done, flags 0x%x\n",
2408 	    device_xname(atac->atac_dev), chp->ch_channel, chp->ch_flags));
2409 	atastart(chp);
2410 }
2411 
2412 static void
2413 mvsata_atapi_polldsc(void *arg)
2414 {
2415 
2416 	mvsata_atapi_phase_complete(arg);
2417 }
2418 #endif	/* NATAPIBUS > 0 */
2419 
2420 
2421 /*
2422  * XXXX: Shall we need lock for race condition in mvsata_edma_enqueue{,_gen2}(),
2423  * if supported queuing command by atabus?  The race condition will not happen
2424  * if this is called only to the thread of atabus.
2425  */
2426 static int
2427 mvsata_edma_enqueue(struct mvsata_port *mvport, struct ata_bio *ata_bio,
2428 		    void *databuf)
2429 {
2430 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2431 	struct ata_channel *chp = &mvport->port_ata_channel;
2432 	struct eprd *eprd;
2433 	bus_addr_t crqb_base_addr;
2434 	bus_dmamap_t data_dmamap;
2435 	uint32_t reg;
2436 	int quetag, erqqip, erqqop, next, rv, i;
2437 
2438 	DPRINTFN(2, ("%s:%d:%d: mvsata_edma_enqueue:"
2439 	    " blkno=0x%" PRIx64 ", nbytes=%d, flags=0x%x\n",
2440 	    device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2441 	    mvport->port, ata_bio->blkno, ata_bio->nbytes, ata_bio->flags));
2442 
2443 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP);
2444 	erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2445 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP);
2446 	erqqip = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2447 	next = erqqip;
2448 	MVSATA_EDMAQ_INC(next);
2449 	if (next == erqqop)
2450 		/* queue full */
2451 		return EBUSY;
2452 	if ((quetag = mvsata_quetag_get(mvport)) == -1)
2453 		/* tag nothing */
2454 		return EBUSY;
2455 	DPRINTFN(2, ("    erqqip=%d, quetag=%d\n", erqqip, quetag));
2456 
2457 	rv = mvsata_dma_bufload(mvport, quetag, databuf, ata_bio->nbytes,
2458 	    ata_bio->flags);
2459 	if (rv != 0)
2460 		return rv;
2461 
2462 	KASSERT(mvport->port_reqtbl[quetag].xfer == NULL);
2463 	KASSERT(chp->ch_queue->active_xfer != NULL);
2464 	mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer;
2465 
2466 	/* setup EDMA Physical Region Descriptors (ePRD) Table Data */
2467 	data_dmamap = mvport->port_reqtbl[quetag].data_dmamap;
2468 	eprd = mvport->port_reqtbl[quetag].eprd;
2469 	for (i = 0; i < data_dmamap->dm_nsegs; i++) {
2470 		bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr;
2471 		bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len;
2472 
2473 		eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK);
2474 		eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len));
2475 		eprd->eot = htole16(0);
2476 		eprd->prdbah = htole32((ds_addr >> 16) >> 16);
2477 		eprd++;
2478 	}
2479 	(eprd - 1)->eot |= htole16(EPRD_EOT);
2480 #ifdef MVSATA_DEBUG
2481 	if (mvsata_debug >= 3)
2482 		mvsata_print_eprd(mvport, quetag);
2483 #endif
2484 	bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2485 	    mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE,
2486 	    BUS_DMASYNC_PREWRITE);
2487 
2488 	/* setup EDMA Command Request Block (CRQB) Data */
2489 	sc->sc_edma_setup_crqb(mvport, erqqip, quetag, ata_bio);
2490 #ifdef MVSATA_DEBUG
2491 	if (mvsata_debug >= 3)
2492 		mvsata_print_crqb(mvport, erqqip);
2493 #endif
2494 	bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap,
2495 	    erqqip * sizeof(union mvsata_crqb),
2496 	    sizeof(union mvsata_crqb), BUS_DMASYNC_PREWRITE);
2497 
2498 	MVSATA_EDMAQ_INC(erqqip);
2499 
2500 	crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr &
2501 	    (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK);
2502 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16);
2503 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP,
2504 	    crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT));
2505 
2506 	return 0;
2507 }
2508 
2509 static int
2510 mvsata_edma_handle(struct mvsata_port *mvport, struct ata_xfer *xfer1)
2511 {
2512 	struct ata_channel *chp = &mvport->port_ata_channel;
2513 	struct crpb *crpb;
2514 	struct ata_bio *ata_bio;
2515 	struct ata_xfer *xfer;
2516 	uint32_t reg;
2517 	int erqqip, erqqop, erpqip, erpqop, prev_erpqop, quetag, handled = 0, n;
2518 
2519 	/* First, Sync for Request Queue buffer */
2520 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_REQQOP);
2521 	erqqop = (reg & EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2522 	if (mvport->port_prev_erqqop != erqqop) {
2523 		const int s = sizeof(union mvsata_crqb);
2524 
2525 		if (mvport->port_prev_erqqop < erqqop)
2526 			n = erqqop - mvport->port_prev_erqqop;
2527 		else {
2528 			if (erqqop > 0)
2529 				bus_dmamap_sync(mvport->port_dmat,
2530 				    mvport->port_crqb_dmamap, 0, erqqop * s,
2531 				    BUS_DMASYNC_POSTWRITE);
2532 			n = MVSATA_EDMAQ_LEN - mvport->port_prev_erqqop;
2533 		}
2534 		if (n > 0)
2535 			bus_dmamap_sync(mvport->port_dmat,
2536 			    mvport->port_crqb_dmamap,
2537 			    mvport->port_prev_erqqop * s, n * s,
2538 			    BUS_DMASYNC_POSTWRITE);
2539 		mvport->port_prev_erqqop = erqqop;
2540 	}
2541 
2542 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQIP);
2543 	erpqip = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT;
2544 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_RESQOP);
2545 	erpqop = (reg & EDMA_RESQP_ERPQP_MASK) >> EDMA_RESQP_ERPQP_SHIFT;
2546 
2547 	DPRINTFN(3, ("%s:%d:%d: mvsata_edma_handle: erpqip=%d, erpqop=%d\n",
2548 	    device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2549 	    mvport->port, erpqip, erpqop));
2550 
2551 	if (erpqop == erpqip)
2552 		return 0;
2553 
2554 	if (erpqop < erpqip)
2555 		n = erpqip - erpqop;
2556 	else {
2557 		if (erpqip > 0)
2558 			bus_dmamap_sync(mvport->port_dmat,
2559 			    mvport->port_crpb_dmamap,
2560 			    0, erpqip * sizeof(struct crpb),
2561 			    BUS_DMASYNC_POSTREAD);
2562 		n = MVSATA_EDMAQ_LEN - erpqop;
2563 	}
2564 	if (n > 0)
2565 		bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap,
2566 		    erpqop * sizeof(struct crpb),
2567 		    n * sizeof(struct crpb), BUS_DMASYNC_POSTREAD);
2568 
2569 	prev_erpqop = erpqop;
2570 	while (erpqop != erpqip) {
2571 #ifdef MVSATA_DEBUG
2572 		if (mvsata_debug >= 3)
2573 			mvsata_print_crpb(mvport, erpqop);
2574 #endif
2575 		crpb = mvport->port_crpb + erpqop;
2576 		quetag = CRPB_CHOSTQUETAG(le16toh(crpb->id));
2577 		KASSERT(chp->ch_queue->active_xfer != NULL);
2578 		xfer = chp->ch_queue->active_xfer;
2579 		KASSERT(xfer == mvport->port_reqtbl[quetag].xfer);
2580 #ifdef DIAGNOSTIC
2581 		if (xfer == NULL)
2582 			panic("unknown response received: %s:%d:%d: tag 0x%x\n",
2583 			    device_xname(MVSATA_DEV2(mvport)),
2584 			    mvport->port_hc->hc, mvport->port, quetag);
2585 #endif
2586 
2587 		bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2588 		    mvport->port_reqtbl[quetag].eprd_offset,
2589 		    MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE);
2590 
2591 		chp->ch_status = CRPB_CDEVSTS(le16toh(crpb->rspflg));
2592 		chp->ch_error = CRPB_CEDMASTS(le16toh(crpb->rspflg));
2593 		ata_bio = xfer->c_cmd;
2594 		ata_bio->error = NOERROR;
2595 		ata_bio->r_error = 0;
2596 		if (chp->ch_status & WDCS_ERR)
2597 			ata_bio->error = ERROR;
2598 		if (chp->ch_status & WDCS_BSY)
2599 			ata_bio->error = TIMEOUT;
2600 		if (chp->ch_error)
2601 			ata_bio->error = ERR_DMA;
2602 
2603 		mvsata_dma_bufunload(mvport, quetag, ata_bio->flags);
2604 		mvport->port_reqtbl[quetag].xfer = NULL;
2605 		mvsata_quetag_put(mvport, quetag);
2606 		MVSATA_EDMAQ_INC(erpqop);
2607 
2608 #if 1	/* XXXX: flags clears here, because necessary the atabus layer. */
2609 		erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) &
2610 		    EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2611 		if (erpqop == erqqip)
2612 			chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT);
2613 #endif
2614 		mvsata_bio_intr(chp, xfer, 1);
2615 		if (xfer1 == NULL)
2616 			handled++;
2617 		else if (xfer == xfer1) {
2618 			handled = 1;
2619 			break;
2620 		}
2621 	}
2622 	if (prev_erpqop < erpqop)
2623 		n = erpqop - prev_erpqop;
2624 	else {
2625 		if (erpqop > 0)
2626 			bus_dmamap_sync(mvport->port_dmat,
2627 			    mvport->port_crpb_dmamap, 0,
2628 			    erpqop * sizeof(struct crpb), BUS_DMASYNC_PREREAD);
2629 		n = MVSATA_EDMAQ_LEN - prev_erpqop;
2630 	}
2631 	if (n > 0)
2632 		bus_dmamap_sync(mvport->port_dmat, mvport->port_crpb_dmamap,
2633 		    prev_erpqop * sizeof(struct crpb),
2634 		    n * sizeof(struct crpb), BUS_DMASYNC_PREREAD);
2635 
2636 	reg &= ~EDMA_RESQP_ERPQP_MASK;
2637 	reg |= (erpqop << EDMA_RESQP_ERPQP_SHIFT);
2638 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, reg);
2639 
2640 #if 0	/* already cleared ago? */
2641 	erqqip = (MVSATA_EDMA_READ_4(mvport, EDMA_REQQIP) &
2642 	    EDMA_REQQP_ERQQP_MASK) >> EDMA_REQQP_ERQQP_SHIFT;
2643 	if (erpqop == erqqip)
2644 		chp->ch_flags &= ~(ATACH_DMA_WAIT | ATACH_IRQ_WAIT);
2645 #endif
2646 
2647 	return handled;
2648 }
2649 
2650 static int
2651 mvsata_edma_wait(struct mvsata_port *mvport, struct ata_xfer *xfer, int timeout)
2652 {
2653 	struct ata_bio *ata_bio = xfer->c_cmd;
2654 	int xtime;
2655 
2656 	for (xtime = 0;  xtime < timeout / 10; xtime++) {
2657 		if (mvsata_edma_handle(mvport, xfer))
2658 			return 0;
2659 		if (ata_bio->flags & ATA_POLL)
2660 			delay(10000);
2661 		else
2662 			tsleep(&xfer, PRIBIO, "mvsataipl", mstohz(10));
2663 	}
2664 
2665 	DPRINTF(("mvsata_edma_wait: timeout: %p\n", xfer));
2666 	mvsata_edma_rqq_remove(mvport, xfer);
2667 	xfer->c_flags |= C_TIMEOU;
2668 	return 1;
2669 }
2670 
2671 static void
2672 mvsata_edma_timeout(void *arg)
2673 {
2674 	struct ata_xfer *xfer = (struct ata_xfer *)arg;
2675 	struct ata_channel *chp = xfer->c_chp;
2676 	struct mvsata_port *mvport = (struct mvsata_port *)chp;
2677 	int s;
2678 
2679 	s = splbio();
2680 	DPRINTF(("mvsata_edma_timeout: %p\n", xfer));
2681 	if ((chp->ch_flags & ATACH_IRQ_WAIT) != 0) {
2682 		mvsata_edma_rqq_remove(mvport, xfer);
2683 		xfer->c_flags |= C_TIMEOU;
2684 		mvsata_bio_intr(chp, xfer, 1);
2685 	}
2686 	splx(s);
2687 }
2688 
2689 static void
2690 mvsata_edma_rqq_remove(struct mvsata_port *mvport, struct ata_xfer *xfer)
2691 {
2692 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2693 	struct ata_bio *ata_bio;
2694 	bus_addr_t crqb_base_addr;
2695 	int erqqip, i;
2696 
2697 	/* First, hardware reset, stop EDMA */
2698 	mvsata_hreset_port(mvport);
2699 
2700 	/* cleanup completed EDMA safely */
2701 	mvsata_edma_handle(mvport, NULL);
2702 
2703 	bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0,
2704 	    sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN, BUS_DMASYNC_PREWRITE);
2705 	for (i = 0, erqqip = 0; i < MVSATA_EDMAQ_LEN; i++) {
2706 		if (mvport->port_reqtbl[i].xfer == NULL)
2707 			continue;
2708 
2709 		ata_bio = mvport->port_reqtbl[i].xfer->c_cmd;
2710 		if (mvport->port_reqtbl[i].xfer == xfer) {
2711 			/* remove xfer from EDMA request queue */
2712 			bus_dmamap_sync(mvport->port_dmat,
2713 			    mvport->port_eprd_dmamap,
2714 			    mvport->port_reqtbl[i].eprd_offset,
2715 			    MVSATA_EPRD_MAX_SIZE, BUS_DMASYNC_POSTWRITE);
2716 			mvsata_dma_bufunload(mvport, i, ata_bio->flags);
2717 			mvport->port_reqtbl[i].xfer = NULL;
2718 			mvsata_quetag_put(mvport, i);
2719 			continue;
2720 		}
2721 
2722 		sc->sc_edma_setup_crqb(mvport, erqqip, i, ata_bio);
2723 		erqqip++;
2724 	}
2725 	bus_dmamap_sync(mvport->port_dmat, mvport->port_crqb_dmamap, 0,
2726 	    sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN,
2727 	    BUS_DMASYNC_POSTWRITE);
2728 
2729 	mvsata_edma_config(mvport, mvport->port_edmamode);
2730 	mvsata_edma_reset_qptr(mvport);
2731 	mvsata_edma_enable(mvport);
2732 
2733 	crqb_base_addr = mvport->port_crqb_dmamap->dm_segs[0].ds_addr &
2734 	    (EDMA_REQQP_ERQQBAP_MASK | EDMA_REQQP_ERQQBA_MASK);
2735 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, (crqb_base_addr >> 16) >> 16);
2736 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP,
2737 	    crqb_base_addr | (erqqip << EDMA_REQQP_ERQQP_SHIFT));
2738 }
2739 
2740 #if NATAPIBUS > 0
2741 static int
2742 mvsata_bdma_init(struct mvsata_port *mvport, struct scsipi_xfer *sc_xfer,
2743 		  void *databuf)
2744 {
2745 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
2746 	struct eprd *eprd;
2747 	bus_dmamap_t data_dmamap;
2748 	bus_addr_t eprd_addr;
2749 	int quetag, rv;
2750 
2751 	DPRINTFN(2,
2752 	    ("%s:%d:%d: mvsata_bdma_init: datalen=%d, xs_control=0x%x\n",
2753 	    device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
2754 	    mvport->port, sc_xfer->datalen, sc_xfer->xs_control));
2755 
2756 	if ((quetag = mvsata_quetag_get(mvport)) == -1)
2757 		/* tag nothing */
2758 		return EBUSY;
2759 	DPRINTFN(2, ("    quetag=%d\n", quetag));
2760 
2761 	rv = mvsata_dma_bufload(mvport, quetag, databuf, sc_xfer->datalen,
2762 	    sc_xfer->xs_control & XS_CTL_DATA_IN ? ATA_READ : 0);
2763 	if (rv != 0)
2764 		return rv;
2765 
2766 	KASSERT(chp->ch_queue->active_xfer != NULL);
2767 	KASSERT(mvport->port_reqtbl[quetag].xfer == NULL);
2768 	mvport->port_reqtbl[quetag].xfer = chp->ch_queue->active_xfer;
2769 
2770 	/* setup EDMA Physical Region Descriptors (ePRD) Table Data */
2771 	data_dmamap = mvport->port_reqtbl[quetag].data_dmamap;
2772 	eprd = mvport->port_reqtbl[quetag].eprd;
2773 	for (i = 0; i < data_dmamap->dm_nsegs; i++) {
2774 		bus_addr_t ds_addr = data_dmamap->dm_segs[i].ds_addr;
2775 		bus_size_t ds_len = data_dmamap->dm_segs[i].ds_len;
2776 
2777 		eprd->prdbal = htole32(ds_addr & EPRD_PRDBAL_MASK);
2778 		eprd->bytecount = htole32(EPRD_BYTECOUNT(ds_len));
2779 		eprd->eot = htole16(0);
2780 		eprd->prdbah = htole32((ds_addr >> 16) >> 16);
2781 		eprd++;
2782 	}
2783 	(eprd - 1)->eot |= htole16(EPRD_EOT);
2784 #ifdef MVSATA_DEBUG
2785 	if (mvsata_debug >= 3)
2786 		mvsata_print_eprd(mvport, quetag);
2787 #endif
2788 	bus_dmamap_sync(mvport->port_dmat, mvport->port_eprd_dmamap,
2789 	    mvport->port_reqtbl[quetag].eprd_offset, MVSATA_EPRD_MAX_SIZE,
2790 	    BUS_DMASYNC_PREWRITE);
2791 	eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
2792 	    mvport->port_reqtbl[quetag].eprd_offset;
2793 
2794 	MVSATA_EDMA_WRITE_4(mvport, DMA_DTLBA, eprd_addr & DMA_DTLBA_MASK);
2795 	MVSATA_EDMA_WRITE_4(mvport, DMA_DTHBA, (eprd_addr >> 16) >> 16);
2796 
2797 	if (sc_xfer->xs_control & XS_CTL_DATA_IN)
2798 		MVSATA_EDMA_WRITE_4(mvport, DMA_C, DMA_C_READ);
2799 	else
2800 		MVSATA_EDMA_WRITE_4(mvport, DMA_C, 0);
2801 
2802 	return 0;
2803 }
2804 
2805 static void
2806 mvsata_bdma_start(struct mvsata_port *mvport)
2807 {
2808 
2809 #ifdef MVSATA_DEBUG
2810 	if (mvsata_debug >= 3)
2811 		mvsata_print_eprd(mvport, 0);
2812 #endif
2813 
2814 	MVSATA_EDMA_WRITE_4(mvport, DMA_C,
2815 	    MVSATA_EDMA_READ_4(mvport, DMA_C) | DMA_C_START);
2816 }
2817 #endif
2818 #endif
2819 
2820 
2821 static int
2822 mvsata_port_init(struct mvsata_hc *mvhc, int port)
2823 {
2824 	struct mvsata_softc *sc = mvhc->hc_sc;
2825 	struct mvsata_port *mvport;
2826 	struct ata_channel *chp;
2827 	int channel, rv, i;
2828 	const int crqbq_size = sizeof(union mvsata_crqb) * MVSATA_EDMAQ_LEN;
2829 	const int crpbq_size = sizeof(struct crpb) * MVSATA_EDMAQ_LEN;
2830 	const int eprd_buf_size = MVSATA_EPRD_MAX_SIZE * MVSATA_EDMAQ_LEN;
2831 
2832 	mvport = malloc(sizeof(struct mvsata_port), M_DEVBUF,
2833 	    M_ZERO | M_NOWAIT);
2834 	if (mvport == NULL) {
2835 		aprint_error("%s:%d: can't allocate memory for port %d\n",
2836 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2837 		return ENOMEM;
2838 	}
2839 
2840 	mvport->port = port;
2841 	mvport->port_hc = mvhc;
2842 	mvport->port_edmamode = nodma;
2843 
2844 	rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh,
2845 	    EDMA_REGISTERS_OFFSET + port * EDMA_REGISTERS_SIZE,
2846 	    EDMA_REGISTERS_SIZE, &mvport->port_ioh);
2847 	if (rv != 0) {
2848 		aprint_error("%s:%d: can't subregion EDMA %d registers\n",
2849 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2850 		goto fail0;
2851 	}
2852 	mvport->port_iot = mvhc->hc_iot;
2853 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SS, 4,
2854 	    &mvport->port_sata_sstatus);
2855 	if (rv != 0) {
2856 		aprint_error("%s:%d:%d: couldn't subregion sstatus regs\n",
2857 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2858 		goto fail0;
2859 	}
2860 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh, SATA_SE, 4,
2861 	    &mvport->port_sata_serror);
2862 	if (rv != 0) {
2863 		aprint_error("%s:%d:%d: couldn't subregion serror regs\n",
2864 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2865 		goto fail0;
2866 	}
2867 	if (sc->sc_rev == gen1)
2868 		rv = bus_space_subregion(mvhc->hc_iot, mvhc->hc_ioh,
2869 		    SATAHC_I_R02(port), 4, &mvport->port_sata_scontrol);
2870 	else
2871 		rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2872 		    SATA_SC, 4, &mvport->port_sata_scontrol);
2873 	if (rv != 0) {
2874 		aprint_error("%s:%d:%d: couldn't subregion scontrol regs\n",
2875 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port);
2876 		goto fail0;
2877 	}
2878 	mvport->port_dmat = sc->sc_dmat;
2879 #ifndef MVSATA_WITHOUTDMA
2880 	mvsata_quetag_init(mvport);
2881 #endif
2882 	mvhc->hc_ports[port] = mvport;
2883 
2884 	channel = mvhc->hc * sc->sc_port + port;
2885 	chp = &mvport->port_ata_channel;
2886 	chp->ch_channel = channel;
2887 	chp->ch_atac = &sc->sc_wdcdev.sc_atac;
2888 	chp->ch_queue = &mvport->port_ata_queue;
2889 	sc->sc_ata_channels[channel] = chp;
2890 
2891 	rv = mvsata_wdc_reg_init(mvport, sc->sc_wdcdev.regs + channel);
2892 	if (rv != 0)
2893 		goto fail0;
2894 
2895 	rv = bus_dmamap_create(mvport->port_dmat, crqbq_size, 1, crqbq_size, 0,
2896 	    BUS_DMA_NOWAIT, &mvport->port_crqb_dmamap);
2897 	if (rv != 0) {
2898 		aprint_error(
2899 		    "%s:%d:%d: EDMA CRQB map create failed: error=%d\n",
2900 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2901 		goto fail0;
2902 	}
2903 	rv = bus_dmamap_create(mvport->port_dmat, crpbq_size, 1, crpbq_size, 0,
2904 	    BUS_DMA_NOWAIT, &mvport->port_crpb_dmamap);
2905 	if (rv != 0) {
2906 		aprint_error(
2907 		    "%s:%d:%d: EDMA CRPB map create failed: error=%d\n",
2908 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2909 		goto fail1;
2910 	}
2911 	rv = bus_dmamap_create(mvport->port_dmat, eprd_buf_size, 1,
2912 	    eprd_buf_size, 0, BUS_DMA_NOWAIT, &mvport->port_eprd_dmamap);
2913 	if (rv != 0) {
2914 		aprint_error(
2915 		    "%s:%d:%d: EDMA ePRD buffer map create failed: error=%d\n",
2916 		    device_xname(MVSATA_DEV(sc)), mvhc->hc, port, rv);
2917 		goto fail2;
2918 	}
2919 	for (i = 0; i < MVSATA_EDMAQ_LEN; i++) {
2920 		rv = bus_dmamap_create(mvport->port_dmat, MAXPHYS,
2921 		    MAXPHYS / PAGE_SIZE, MAXPHYS, 0, BUS_DMA_NOWAIT,
2922 		    &mvport->port_reqtbl[i].data_dmamap);
2923 		if (rv != 0) {
2924 			aprint_error("%s:%d:%d:"
2925 			    " EDMA data map(%d) create failed: error=%d\n",
2926 			    device_xname(MVSATA_DEV(sc)), mvhc->hc, port, i,
2927 			    rv);
2928 			goto fail3;
2929 		}
2930 	}
2931 
2932 	return 0;
2933 
2934 fail3:
2935 	for (i--; i >= 0; i--)
2936 		bus_dmamap_destroy(mvport->port_dmat,
2937 		    mvport->port_reqtbl[i].data_dmamap);
2938 	bus_dmamap_destroy(mvport->port_dmat, mvport->port_eprd_dmamap);
2939 fail2:
2940 	bus_dmamap_destroy(mvport->port_dmat, mvport->port_crpb_dmamap);
2941 fail1:
2942 	bus_dmamap_destroy(mvport->port_dmat, mvport->port_crqb_dmamap);
2943 fail0:
2944 	return rv;
2945 }
2946 
2947 static int
2948 mvsata_wdc_reg_init(struct mvsata_port *mvport, struct wdc_regs *wdr)
2949 {
2950 	int hc, port, rv, i;
2951 
2952 	hc = mvport->port_hc->hc;
2953 	port = mvport->port;
2954 
2955 	/* Create subregion for Shadow Registers Map */
2956 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2957 	    SHADOW_REG_BLOCK_OFFSET, SHADOW_REG_BLOCK_SIZE, &wdr->cmd_baseioh);
2958 	if (rv != 0) {
2959 		aprint_error("%s:%d:%d: couldn't subregion shadow block regs\n",
2960 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
2961 		return rv;
2962 	}
2963 	wdr->cmd_iot = mvport->port_iot;
2964 
2965 	/* Once create subregion for each command registers */
2966 	for (i = 0; i < WDC_NREG; i++) {
2967 		rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
2968 		    i * 4, sizeof(uint32_t), &wdr->cmd_iohs[i]);
2969 		if (rv != 0) {
2970 			aprint_error("%s:%d:%d: couldn't subregion cmd regs\n",
2971 			    device_xname(MVSATA_DEV2(mvport)), hc, port);
2972 			return rv;
2973 		}
2974 	}
2975 	/* Create subregion for Alternate Status register */
2976 	rv = bus_space_subregion(wdr->cmd_iot, wdr->cmd_baseioh,
2977 	    i * 4, sizeof(uint32_t), &wdr->ctl_ioh);
2978 	if (rv != 0) {
2979 		aprint_error("%s:%d:%d: couldn't subregion cmd regs\n",
2980 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
2981 		return rv;
2982 	}
2983 	wdr->ctl_iot = mvport->port_iot;
2984 
2985 	wdc_init_shadow_regs(&mvport->port_ata_channel);
2986 
2987 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2988 	    SATA_SS, sizeof(uint32_t) * 3, &wdr->sata_baseioh);
2989 	if (rv != 0) {
2990 		aprint_error("%s:%d:%d: couldn't subregion SATA regs\n",
2991 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
2992 		return rv;
2993 	}
2994 	wdr->sata_iot = mvport->port_iot;
2995 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
2996 	    SATA_SC, sizeof(uint32_t), &wdr->sata_control);
2997 	if (rv != 0) {
2998 		aprint_error("%s:%d:%d: couldn't subregion SControl\n",
2999 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
3000 		return rv;
3001 	}
3002 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
3003 	    SATA_SS, sizeof(uint32_t), &wdr->sata_status);
3004 	if (rv != 0) {
3005 		aprint_error("%s:%d:%d: couldn't subregion SStatus\n",
3006 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
3007 		return rv;
3008 	}
3009 	rv = bus_space_subregion(mvport->port_iot, mvport->port_ioh,
3010 	    SATA_SE, sizeof(uint32_t), &wdr->sata_error);
3011 	if (rv != 0) {
3012 		aprint_error("%s:%d:%d: couldn't subregion SError\n",
3013 		    device_xname(MVSATA_DEV2(mvport)), hc, port);
3014 		return rv;
3015 	}
3016 
3017 	return 0;
3018 }
3019 
3020 
3021 #ifndef MVSATA_WITHOUTDMA
3022 /*
3023  * There are functions to determine Host Queue Tag.
3024  * XXXX: We hope to rotate Tag to facilitate debugging.
3025  */
3026 
3027 static inline void
3028 mvsata_quetag_init(struct mvsata_port *mvport)
3029 {
3030 
3031 	mvport->port_quetagidx = 0;
3032 }
3033 
3034 static inline int
3035 mvsata_quetag_get(struct mvsata_port *mvport)
3036 {
3037 	int begin = mvport->port_quetagidx;
3038 
3039 	do {
3040 		if (mvport->port_reqtbl[mvport->port_quetagidx].xfer == NULL) {
3041 			MVSATA_EDMAQ_INC(mvport->port_quetagidx);
3042 			return mvport->port_quetagidx;
3043 		}
3044 		MVSATA_EDMAQ_INC(mvport->port_quetagidx);
3045 	} while (mvport->port_quetagidx != begin);
3046 
3047 	return -1;
3048 }
3049 
3050 static inline void
3051 mvsata_quetag_put(struct mvsata_port *mvport, int quetag)
3052 {
3053 
3054 	/* nothing */
3055 }
3056 
3057 static void *
3058 mvsata_edma_resource_prepare(struct mvsata_port *mvport, bus_dma_tag_t dmat,
3059 			     bus_dmamap_t *dmamap, size_t size, int write)
3060 {
3061 	bus_dma_segment_t seg;
3062 	int nseg, rv;
3063 	void *kva;
3064 
3065 	rv = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &seg, 1, &nseg,
3066 	    BUS_DMA_NOWAIT);
3067 	if (rv != 0) {
3068 		aprint_error("%s:%d:%d: DMA memory alloc failed: error=%d\n",
3069 		    device_xname(MVSATA_DEV2(mvport)),
3070 		    mvport->port_hc->hc, mvport->port, rv);
3071 		goto fail;
3072 	}
3073 
3074 	rv = bus_dmamem_map(dmat, &seg, nseg, size, &kva, BUS_DMA_NOWAIT);
3075 	if (rv != 0) {
3076 		aprint_error("%s:%d:%d: DMA memory map failed: error=%d\n",
3077 		    device_xname(MVSATA_DEV2(mvport)),
3078 		    mvport->port_hc->hc, mvport->port, rv);
3079 		goto free;
3080 	}
3081 
3082 	rv = bus_dmamap_load(dmat, *dmamap, kva, size, NULL,
3083 	    BUS_DMA_NOWAIT | (write ? BUS_DMA_WRITE : BUS_DMA_READ));
3084 	if (rv != 0) {
3085 		aprint_error("%s:%d:%d: DMA map load failed: error=%d\n",
3086 		    device_xname(MVSATA_DEV2(mvport)),
3087 		    mvport->port_hc->hc, mvport->port, rv);
3088 		goto unmap;
3089 	}
3090 
3091 	if (!write)
3092 		bus_dmamap_sync(dmat, *dmamap, 0, size, BUS_DMASYNC_PREREAD);
3093 
3094 	return kva;
3095 
3096 unmap:
3097 	bus_dmamem_unmap(dmat, kva, size);
3098 free:
3099 	bus_dmamem_free(dmat, &seg, nseg);
3100 fail:
3101 	return NULL;
3102 }
3103 
3104 /* ARGSUSED */
3105 static void
3106 mvsata_edma_resource_purge(struct mvsata_port *mvport, bus_dma_tag_t dmat,
3107 			   bus_dmamap_t dmamap, void *kva)
3108 {
3109 
3110 	bus_dmamap_unload(dmat, dmamap);
3111 	bus_dmamem_unmap(dmat, kva, dmamap->dm_mapsize);
3112 	bus_dmamem_free(dmat, dmamap->dm_segs, dmamap->dm_nsegs);
3113 }
3114 
3115 static int
3116 mvsata_dma_bufload(struct mvsata_port *mvport, int index, void *databuf,
3117 		   size_t datalen, int flags)
3118 {
3119 	int rv, lop, sop;
3120 	bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap;
3121 
3122 	lop = (flags & ATA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE;
3123 	sop = (flags & ATA_READ) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE;
3124 
3125 	rv = bus_dmamap_load(mvport->port_dmat, data_dmamap, databuf, datalen,
3126 	    NULL, BUS_DMA_NOWAIT | lop);
3127 	if (rv) {
3128 		aprint_error("%s:%d:%d: buffer load failed: error=%d",
3129 		    device_xname(MVSATA_DEV2(mvport)), mvport->port_hc->hc,
3130 		    mvport->port, rv);
3131 		return rv;
3132 	}
3133 	bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0,
3134 	    data_dmamap->dm_mapsize, sop);
3135 
3136 	return 0;
3137 }
3138 
3139 static inline void
3140 mvsata_dma_bufunload(struct mvsata_port *mvport, int index, int flags)
3141 {
3142 	bus_dmamap_t data_dmamap = mvport->port_reqtbl[index].data_dmamap;
3143 
3144 	bus_dmamap_sync(mvport->port_dmat, data_dmamap, 0,
3145 	    data_dmamap->dm_mapsize,
3146 	    (flags & ATA_READ) ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
3147 	bus_dmamap_unload(mvport->port_dmat, data_dmamap);
3148 }
3149 #endif
3150 
3151 static void
3152 mvsata_hreset_port(struct mvsata_port *mvport)
3153 {
3154 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3155 
3156 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EATARST);
3157 
3158 	delay(25);		/* allow reset propagation */
3159 
3160 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0);
3161 
3162 	mvport->_fix_phy_param._fix_phy(mvport);
3163 
3164 	if (sc->sc_gen == gen1)
3165 		delay(1000);
3166 }
3167 
3168 static void
3169 mvsata_reset_port(struct mvsata_port *mvport)
3170 {
3171 	device_t parent = device_parent(MVSATA_DEV2(mvport));
3172 
3173 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA);
3174 
3175 	mvsata_hreset_port(mvport);
3176 
3177 	if (device_is_a(parent, "pci"))
3178 		MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG,
3179 		    EDMA_CFG_RESERVED | EDMA_CFG_ERDBSZ);
3180 	else	/* SoC */
3181 		MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG,
3182 		    EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2);
3183 	MVSATA_EDMA_WRITE_4(mvport, EDMA_T, 0);
3184 	MVSATA_EDMA_WRITE_4(mvport, SATA_SEIM, 0x019c0000);
3185 	MVSATA_EDMA_WRITE_4(mvport, SATA_SE, ~0);
3186 	MVSATA_EDMA_WRITE_4(mvport, SATA_FISIC, 0);
3187 	MVSATA_EDMA_WRITE_4(mvport, EDMA_IEC, 0);
3188 	MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, 0);
3189 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0);
3190 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0);
3191 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0);
3192 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, 0);
3193 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0);
3194 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, 0);
3195 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, 0);
3196 	MVSATA_EDMA_WRITE_4(mvport, EDMA_TC, 0);
3197 	MVSATA_EDMA_WRITE_4(mvport, EDMA_IORT, 0xbc);
3198 }
3199 
3200 static void
3201 mvsata_reset_hc(struct mvsata_hc *mvhc)
3202 {
3203 #if 0
3204 	uint32_t val;
3205 #endif
3206 
3207 	MVSATA_HC_WRITE_4(mvhc, SATAHC_ICT, 0);
3208 	MVSATA_HC_WRITE_4(mvhc, SATAHC_ITT, 0);
3209 	MVSATA_HC_WRITE_4(mvhc, SATAHC_IC, 0);
3210 
3211 #if 0	/* XXXX needs? */
3212 	MVSATA_HC_WRITE_4(mvhc, 0x01c, 0);
3213 
3214 	/*
3215 	 * Keep the SS during power on and the reference clock bits (reset
3216 	 * sample)
3217 	 */
3218 	val = MVSATA_HC_READ_4(mvhc, 0x020);
3219 	val &= 0x1c1c1c1c;
3220 	val |= 0x03030303;
3221 	MVSATA_HC_READ_4(mvhc, 0x020, 0);
3222 #endif
3223 }
3224 
3225 #define WDCDELAY  100 /* 100 microseconds */
3226 #define WDCNDELAY_RST (WDC_RESET_WAIT * 1000 / WDCDELAY)
3227 
3228 static uint32_t
3229 mvsata_softreset(struct mvsata_port *mvport, int waitok)
3230 {
3231 	uint32_t sig0 = ~0;
3232 	int timeout, nloop;
3233 	uint8_t st0;
3234 
3235 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_RST | WDCTL_IDS | WDCTL_4BIT);
3236 	delay(10);
3237 	(void) MVSATA_WDC_READ_1(mvport, SRB_FE);
3238 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_IDS | WDCTL_4BIT);
3239 	delay(10);
3240 
3241 	if (!waitok)
3242 		nloop = WDCNDELAY_RST;
3243 	else
3244 		nloop = WDC_RESET_WAIT * hz / 1000;
3245 
3246 	/* wait for BSY to deassert */
3247 	for (timeout = 0; timeout < nloop; timeout++) {
3248 		st0 = MVSATA_WDC_READ_1(mvport, SRB_CS);
3249 
3250 		if ((st0 & WDCS_BSY) == 0) {
3251 			sig0 = MVSATA_WDC_READ_1(mvport, SRB_SC) << 0;
3252 			sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAL) << 8;
3253 			sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAM) << 16;
3254 			sig0 |= MVSATA_WDC_READ_1(mvport, SRB_LBAH) << 24;
3255 			goto out;
3256 		}
3257 		if (!waitok)
3258 			delay(WDCDELAY);
3259 		else
3260 			tsleep(&nloop, PRIBIO, "atarst", 1);
3261 	}
3262 
3263 out:
3264 	MVSATA_WDC_WRITE_1(mvport, SRB_CAS, WDCTL_4BIT);
3265 	return sig0;
3266 }
3267 
3268 #ifndef MVSATA_WITHOUTDMA
3269 static void
3270 mvsata_edma_reset_qptr(struct mvsata_port *mvport)
3271 {
3272 	const bus_addr_t crpb_addr =
3273 	    mvport->port_crpb_dmamap->dm_segs[0].ds_addr;
3274 	const uint32_t crpb_addr_mask =
3275 	    EDMA_RESQP_ERPQBAP_MASK | EDMA_RESQP_ERPQBA_MASK;
3276 
3277 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQBAH, 0);
3278 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQIP, 0);
3279 	MVSATA_EDMA_WRITE_4(mvport, EDMA_REQQOP, 0);
3280 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQBAH, (crpb_addr >> 16) >> 16);
3281 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQIP, 0);
3282 	MVSATA_EDMA_WRITE_4(mvport, EDMA_RESQOP, (crpb_addr & crpb_addr_mask));
3283 }
3284 
3285 static inline void
3286 mvsata_edma_enable(struct mvsata_port *mvport)
3287 {
3288 
3289 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EENEDMA);
3290 }
3291 
3292 static int
3293 mvsata_edma_disable(struct mvsata_port *mvport, int timeout, int waitok)
3294 {
3295 	uint32_t status, command;
3296 	int ms;
3297 
3298 	if (MVSATA_EDMA_READ_4(mvport, EDMA_CMD) & EDMA_CMD_EENEDMA) {
3299 		for (ms = 0; ms < timeout; ms++) {
3300 			status = MVSATA_EDMA_READ_4(mvport, EDMA_S);
3301 			if (status & EDMA_S_EDMAIDLE)
3302 				break;
3303 			if (waitok)
3304 				tsleep(&waitok, PRIBIO, "mvsata_edma1",
3305 				    mstohz(1));
3306 			else
3307 				delay(1000);
3308 		}
3309 		if (ms == timeout) {
3310 			aprint_error("%s:%d:%d: unable to stop EDMA\n",
3311 			    device_xname(MVSATA_DEV2(mvport)),
3312 			    mvport->port_hc->hc, mvport->port);
3313 			return EBUSY;
3314 		}
3315 
3316 		/* The diable bit (eDsEDMA) is self negated. */
3317 		MVSATA_EDMA_WRITE_4(mvport, EDMA_CMD, EDMA_CMD_EDSEDMA);
3318 
3319 		for ( ; ms < timeout; ms++) {
3320 			command = MVSATA_EDMA_READ_4(mvport, EDMA_CMD);
3321 			if (!(command & EDMA_CMD_EENEDMA))
3322 				break;
3323 			if (waitok)
3324 				tsleep(&waitok, PRIBIO, "mvsata_edma2",
3325 				    mstohz(1));
3326 			else
3327 				delay(1000);
3328 		}
3329 		if (ms == timeout) {
3330 			aprint_error("%s:%d:%d: unable to stop EDMA\n",
3331 			    device_xname(MVSATA_DEV2(mvport)),
3332 			    mvport->port_hc->hc, mvport->port);
3333 			return EBUSY;
3334 		}
3335 	}
3336 	return 0;
3337 }
3338 
3339 /*
3340  * Set EDMA registers according to mode.
3341  *       ex. NCQ/TCQ(queued)/non queued.
3342  */
3343 static void
3344 mvsata_edma_config(struct mvsata_port *mvport, int mode)
3345 {
3346 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3347 	uint32_t reg;
3348 
3349 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_CFG);
3350 	reg |= EDMA_CFG_RESERVED;
3351 
3352 	if (mode == ncq) {
3353 		if (sc->sc_gen == gen1) {
3354 			aprint_error_dev(MVSATA_DEV2(mvport),
3355 			    "GenI not support NCQ\n");
3356 			return;
3357 		} else if (sc->sc_gen == gen2)
3358 			reg |= EDMA_CFG_EDEVERR;
3359 		reg |= EDMA_CFG_ESATANATVCMDQUE;
3360 	} else if (mode == queued) {
3361 		reg &= ~EDMA_CFG_ESATANATVCMDQUE;
3362 		reg |= EDMA_CFG_EQUE;
3363 	} else
3364 		reg &= ~(EDMA_CFG_ESATANATVCMDQUE | EDMA_CFG_EQUE);
3365 
3366 	if (sc->sc_gen == gen1)
3367 		reg |= EDMA_CFG_ERDBSZ;
3368 	else if (sc->sc_gen == gen2)
3369 		reg |= (EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN);
3370 	else if (sc->sc_gen == gen2e) {
3371 		device_t parent = device_parent(MVSATA_DEV(sc));
3372 
3373 		reg |= (EDMA_CFG_EMASKRXPM | EDMA_CFG_EHOSTQUEUECACHEEN);
3374 		reg &= ~(EDMA_CFG_EEDMAFBS | EDMA_CFG_EEDMAQUELEN);
3375 
3376 		if (device_is_a(parent, "pci"))
3377 			reg |= (
3378 #if NATAPIBUS > 0
3379 			    EDMA_CFG_EEARLYCOMPLETIONEN |
3380 #endif
3381 			    EDMA_CFG_ECUTTHROUGHEN |
3382 			    EDMA_CFG_EWRBUFFERLEN |
3383 			    EDMA_CFG_ERDBSZEXT);
3384 	}
3385 	MVSATA_EDMA_WRITE_4(mvport, EDMA_CFG, reg);
3386 
3387 	reg = (
3388 	    EDMA_IE_EIORDYERR |
3389 	    EDMA_IE_ETRANSINT |
3390 	    EDMA_IE_EDEVCON |
3391 	    EDMA_IE_EDEVDIS);
3392 	if (sc->sc_gen != gen1)
3393 		reg |= (
3394 		    EDMA_IE_TRANSPROTERR |
3395 		    EDMA_IE_LINKDATATXERR(EDMA_IE_LINKTXERR_FISTXABORTED) |
3396 		    EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3397 		    EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3398 		    EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3399 		    EDMA_IE_LINKDATATXERR(EDMA_IE_LINKXERR_SATACRC) |
3400 		    EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3401 		    EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3402 		    EDMA_IE_LINKCTLTXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3403 		    EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3404 		    EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3405 		    EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3406 		    EDMA_IE_LINKDATARXERR(EDMA_IE_LINKXERR_SATACRC) |
3407 		    EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_OTHERERRORS) |
3408 		    EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_LINKLAYERRESET) |
3409 		    EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_INTERNALFIFO) |
3410 		    EDMA_IE_LINKCTLRXERR(EDMA_IE_LINKXERR_SATACRC) |
3411 		    EDMA_IE_ESELFDIS);
3412 
3413 	if (mode == ncq)
3414 	    reg |= EDMA_IE_EDEVERR;
3415 	MVSATA_EDMA_WRITE_4(mvport, EDMA_IEM, reg);
3416 	reg = MVSATA_EDMA_READ_4(mvport, EDMA_HC);
3417 	reg &= ~EDMA_IE_EDEVERR;
3418 	if (mode != ncq)
3419 	    reg |= EDMA_IE_EDEVERR;
3420 	MVSATA_EDMA_WRITE_4(mvport, EDMA_HC, reg);
3421 	if (sc->sc_gen == gen2e) {
3422 		/*
3423 		 * Clear FISWait4HostRdyEn[0] and [2].
3424 		 *   [0]: Device to Host FIS with <ERR> or <DF> bit set to 1.
3425 		 *   [2]: SDB FIS is received with <ERR> bit set to 1.
3426 		 */
3427 		reg = MVSATA_EDMA_READ_4(mvport, SATA_FISC);
3428 		reg &= ~(SATA_FISC_FISWAIT4HOSTRDYEN_B0 |
3429 		    SATA_FISC_FISWAIT4HOSTRDYEN_B2);
3430 		MVSATA_EDMA_WRITE_4(mvport, SATA_FISC, reg);
3431 	}
3432 
3433 	mvport->port_edmamode = mode;
3434 }
3435 
3436 
3437 /*
3438  * Generation dependent functions
3439  */
3440 
3441 static void
3442 mvsata_edma_setup_crqb(struct mvsata_port *mvport, int erqqip, int quetag,
3443 		       struct ata_bio  *ata_bio)
3444 {
3445 	struct crqb *crqb;
3446 	bus_addr_t eprd_addr;
3447 	daddr_t blkno;
3448 	uint32_t rw;
3449 	uint8_t cmd, head;
3450 	int i;
3451 	const int drive =
3452 	    mvport->port_ata_channel.ch_queue->active_xfer->c_drive;
3453 
3454 	eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
3455 	    mvport->port_reqtbl[quetag].eprd_offset;
3456 	rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE;
3457 	cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA;
3458 	if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) {
3459 		head = WDSD_LBA;
3460 	} else {
3461 		head = 0;
3462 	}
3463 	blkno = ata_bio->blkno;
3464 	if (ata_bio->flags & ATA_LBA48)
3465 		cmd = atacmd_to48(cmd);
3466 	else {
3467 		head |= ((ata_bio->blkno >> 24) & 0xf);
3468 		blkno &= 0xffffff;
3469 	}
3470 	crqb = &mvport->port_crqb->crqb + erqqip;
3471 	crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK);
3472 	crqb->cprdbh = htole32((eprd_addr >> 16) >> 16);
3473 	crqb->ctrlflg =
3474 	    htole16(rw | CRQB_CHOSTQUETAG(quetag) | CRQB_CPMPORT(drive));
3475 	i = 0;
3476 	if (mvport->port_edmamode == dma) {
3477 		if (ata_bio->flags & ATA_LBA48)
3478 			crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3479 			    CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks >> 8));
3480 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3481 		    CRQB_ATACOMMAND_SECTORCOUNT, ata_bio->nblks));
3482 	} else { /* ncq/queued */
3483 
3484 		/*
3485 		 * XXXX: Oops, ata command is not correct.  And, atabus layer
3486 		 * has not been supported yet now.
3487 		 *   Queued DMA read/write.
3488 		 *   read/write FPDMAQueued.
3489 		 */
3490 
3491 		if (ata_bio->flags & ATA_LBA48)
3492 			crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3493 			    CRQB_ATACOMMAND_FEATURES, ata_bio->nblks >> 8));
3494 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3495 		    CRQB_ATACOMMAND_FEATURES, ata_bio->nblks));
3496 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3497 		    CRQB_ATACOMMAND_SECTORCOUNT, quetag << 3));
3498 	}
3499 	if (ata_bio->flags & ATA_LBA48) {
3500 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3501 		    CRQB_ATACOMMAND_LBALOW, blkno >> 24));
3502 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3503 		    CRQB_ATACOMMAND_LBAMID, blkno >> 32));
3504 		crqb->atacommand[i++] = htole16(CRQB_ATACOMMAND(
3505 		    CRQB_ATACOMMAND_LBAHIGH, blkno >> 40));
3506 	}
3507 	crqb->atacommand[i++] =
3508 	    htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBALOW, blkno));
3509 	crqb->atacommand[i++] =
3510 	    htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAMID, blkno >> 8));
3511 	crqb->atacommand[i++] =
3512 	    htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_LBAHIGH, blkno >> 16));
3513 	crqb->atacommand[i++] =
3514 	    htole16(CRQB_ATACOMMAND(CRQB_ATACOMMAND_DEVICE, head));
3515 	crqb->atacommand[i++] = htole16(
3516 	    CRQB_ATACOMMAND(CRQB_ATACOMMAND_COMMAND, cmd) |
3517 	    CRQB_ATACOMMAND_LAST);
3518 }
3519 #endif
3520 
3521 static uint32_t
3522 mvsata_read_preamps_gen1(struct mvsata_port *mvport)
3523 {
3524 	struct mvsata_hc *hc = mvport->port_hc;
3525 	uint32_t reg;
3526 
3527 	reg = MVSATA_HC_READ_4(hc, SATAHC_I_PHYMODE(mvport->port));
3528 	/*
3529 	 * [12:11] : pre
3530 	 * [7:5]   : amps
3531 	 */
3532 	return reg & 0x000018e0;
3533 }
3534 
3535 static void
3536 mvsata_fix_phy_gen1(struct mvsata_port *mvport)
3537 {
3538 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3539 	struct mvsata_hc *mvhc = mvport->port_hc;
3540 	uint32_t reg;
3541 	int port = mvport->port, fix_apm_sq = 0;
3542 
3543 	if (sc->sc_model == PCI_PRODUCT_MARVELL_88SX5080) {
3544 		if (sc->sc_rev == 0x01)
3545 			fix_apm_sq = 1;
3546 	} else {
3547 		if (sc->sc_rev == 0x00)
3548 			fix_apm_sq = 1;
3549 	}
3550 
3551 	if (fix_apm_sq) {
3552 		/*
3553 		 * Disable auto-power management
3554 		 *   88SX50xx FEr SATA#12
3555 		 */
3556 		reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_LTMODE(port));
3557 		reg |= (1 << 19);
3558 		MVSATA_HC_WRITE_4(mvhc, SATAHC_I_LTMODE(port), reg);
3559 
3560 		/*
3561 		 * Fix squelch threshold
3562 		 *   88SX50xx FEr SATA#9
3563 		 */
3564 		reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYCONTROL(port));
3565 		reg &= ~0x3;
3566 		reg |= 0x1;
3567 		MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYCONTROL(port), reg);
3568 	}
3569 
3570 	/* Revert values of pre-emphasis and signal amps to the saved ones */
3571 	reg = MVSATA_HC_READ_4(mvhc, SATAHC_I_PHYMODE(port));
3572 	reg &= ~0x000018e0;	/* pre and amps mask */
3573 	reg |= mvport->_fix_phy_param.pre_amps;
3574 	MVSATA_HC_WRITE_4(mvhc, SATAHC_I_PHYMODE(port), reg);
3575 }
3576 
3577 static void
3578 mvsata_devconn_gen1(struct mvsata_port *mvport)
3579 {
3580 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3581 
3582 	/* Fix for 88SX50xx FEr SATA#2 */
3583 	mvport->_fix_phy_param._fix_phy(mvport);
3584 
3585 	/* If disk is connected, then enable the activity LED */
3586 	if (sc->sc_rev == 0x03) {
3587 		/* XXXXX */
3588 	}
3589 }
3590 
3591 static uint32_t
3592 mvsata_read_preamps_gen2(struct mvsata_port *mvport)
3593 {
3594 	uint32_t reg;
3595 
3596 	reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3597 	/*
3598 	 * [10:8] : amps
3599 	 * [7:5]  : pre
3600 	 */
3601 	return reg & 0x000007e0;
3602 }
3603 
3604 static void
3605 mvsata_fix_phy_gen2(struct mvsata_port *mvport)
3606 {
3607 	struct mvsata_softc *sc = device_private(MVSATA_DEV2(mvport));
3608 	uint32_t reg;
3609 
3610 	if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) ||
3611 	    sc->sc_gen == gen2e) {
3612 		/*
3613 		 * Fix for
3614 		 *   88SX60X1 FEr SATA #23
3615 		 *   88SX6042/88SX7042 FEr SATA #23
3616 		 *   88F5182 FEr #SATA-S13
3617 		 *   88F5082 FEr #SATA-S13
3618 		 */
3619 		reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3620 		reg &= ~(1 << 16);
3621 		reg |= (1 << 31);
3622 		MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3623 
3624 		delay(200);
3625 
3626 		reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3627 		reg &= ~((1 << 16) | (1 << 31));
3628 		MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3629 
3630 		delay(200);
3631 	}
3632 
3633 	/* Fix values in PHY Mode 3 Register.*/
3634 	reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3);
3635 	reg &= ~0x7F900000;
3636 	reg |= 0x2A800000;
3637 	/* Implement Guidline 88F5182, 88F5082, 88F6082 (GL# SATA-S11) */
3638 	if (sc->sc_model == PCI_PRODUCT_MARVELL_88F5082 ||
3639 	    sc->sc_model == PCI_PRODUCT_MARVELL_88F5182 ||
3640 	    sc->sc_model == PCI_PRODUCT_MARVELL_88F6082)
3641 		reg &= ~0x0000001c;
3642 	MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, reg);
3643 
3644 	/*
3645 	 * Fix values in PHY Mode 4 Register.
3646 	 *   88SX60x1 FEr SATA#10
3647 	 *   88F5182 GL #SATA-S10
3648 	 *   88F5082 GL #SATA-S10
3649 	 */
3650 	if ((sc->sc_gen == gen2 && sc->sc_rev == 0x07) ||
3651 	    sc->sc_gen == gen2e) {
3652 		uint32_t tmp = 0;
3653 
3654 		/* 88SX60x1 FEr SATA #13 */
3655 		if (sc->sc_gen == 2 && sc->sc_rev == 0x07)
3656 			tmp = MVSATA_EDMA_READ_4(mvport, SATA_PHYM3);
3657 
3658 		reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM4);
3659 		reg |= (1 << 0);
3660 		reg &= ~(1 << 1);
3661 		/* PHY Mode 4 Register of Gen IIE has some restriction */
3662 		if (sc->sc_gen == gen2e) {
3663 			reg &= ~0x5de3fffc;
3664 			reg |= (1 << 2);
3665 		}
3666 		MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM4, reg);
3667 
3668 		/* 88SX60x1 FEr SATA #13 */
3669 		if (sc->sc_gen == 2 && sc->sc_rev == 0x07)
3670 			MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM3, tmp);
3671 	}
3672 
3673 	/* Revert values of pre-emphasis and signal amps to the saved ones */
3674 	reg = MVSATA_EDMA_READ_4(mvport, SATA_PHYM2);
3675 	reg &= ~0x000007e0;	/* pre and amps mask */
3676 	reg |= mvport->_fix_phy_param.pre_amps;
3677 	reg &= ~(1 << 16);
3678 	if (sc->sc_gen == gen2e) {
3679 		/*
3680 		 * according to mvSata 3.6.1, some IIE values are fixed.
3681 		 * some reserved fields must be written with fixed values.
3682 		 */
3683 		reg &= ~0xC30FF01F;
3684 		reg |= 0x0000900F;
3685 	}
3686 	MVSATA_EDMA_WRITE_4(mvport, SATA_PHYM2, reg);
3687 }
3688 
3689 #ifndef MVSATA_WITHOUTDMA
3690 static void
3691 mvsata_edma_setup_crqb_gen2e(struct mvsata_port *mvport, int erqqip, int quetag,
3692 			     struct ata_bio  *ata_bio)
3693 {
3694 	struct crqb_gen2e *crqb;
3695 	bus_addr_t eprd_addr;
3696 	daddr_t blkno;
3697 	uint32_t ctrlflg, rw;
3698 	uint8_t cmd, head;
3699 	const int drive =
3700 	    mvport->port_ata_channel.ch_queue->active_xfer->c_drive;
3701 
3702 	eprd_addr = mvport->port_eprd_dmamap->dm_segs[0].ds_addr +
3703 	    mvport->port_reqtbl[quetag].eprd_offset;
3704 	rw = (ata_bio->flags & ATA_READ) ? CRQB_CDIR_READ : CRQB_CDIR_WRITE;
3705 	ctrlflg = (rw | CRQB_CDEVICEQUETAG(0) | CRQB_CPMPORT(drive) |
3706 	    CRQB_CPRDMODE_EPRD | CRQB_CHOSTQUETAG_GEN2(quetag));
3707 	cmd = (ata_bio->flags & ATA_READ) ? WDCC_READDMA : WDCC_WRITEDMA;
3708 	if (ata_bio->flags & (ATA_LBA|ATA_LBA48)) {
3709 		head = WDSD_LBA;
3710 	} else {
3711 		head = 0;
3712 	}
3713 	blkno = ata_bio->blkno;
3714 	if (ata_bio->flags & ATA_LBA48)
3715 		cmd = atacmd_to48(cmd);
3716 	else {
3717 		head |= ((ata_bio->blkno >> 24) & 0xf);
3718 		blkno &= 0xffffff;
3719 	}
3720 	crqb = &mvport->port_crqb->crqb_gen2e + erqqip;
3721 	crqb->cprdbl = htole32(eprd_addr & CRQB_CRQBL_EPRD_MASK);
3722 	crqb->cprdbh = htole32((eprd_addr >> 16) >> 16);
3723 	crqb->ctrlflg = htole32(ctrlflg);
3724 	if (mvport->port_edmamode == dma) {
3725 		crqb->atacommand[0] = htole32(cmd << 16);
3726 		crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24);
3727 		crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff));
3728 		crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff);
3729 	} else { /* ncq/queued */
3730 
3731 		/*
3732 		 * XXXX: Oops, ata command is not correct.  And, atabus layer
3733 		 * has not been supported yet now.
3734 		 *   Queued DMA read/write.
3735 		 *   read/write FPDMAQueued.
3736 		 */
3737 
3738 		crqb->atacommand[0] = htole32(
3739 		    (cmd << 16) | ((ata_bio->nblks & 0xff) << 24));
3740 		crqb->atacommand[1] = htole32((blkno & 0xffffff) | head << 24);
3741 		crqb->atacommand[2] = htole32(((blkno >> 24) & 0xffffff) |
3742 		    ((ata_bio->nblks >> 8) & 0xff));
3743 		crqb->atacommand[3] = htole32(ata_bio->nblks & 0xffff);
3744 		crqb->atacommand[3] = htole32(quetag << 3);
3745 	}
3746 }
3747 
3748 
3749 #ifdef MVSATA_DEBUG
3750 #define MVSATA_DEBUG_PRINT(type, size, n, p)		\
3751 	do {						\
3752 		int _i;					\
3753 		u_char *_p = (p);			\
3754 							\
3755 		printf(#type "(%d)", (n));		\
3756 		for (_i = 0; _i < (size); _i++, _p++) {	\
3757 			if (_i % 16 == 0)		\
3758 				printf("\n   ");	\
3759 			printf(" %02x", *_p);		\
3760 		}					\
3761 		printf("\n");				\
3762 	} while (0 /* CONSTCOND */)
3763 
3764 static void
3765 mvsata_print_crqb(struct mvsata_port *mvport, int n)
3766 {
3767 
3768 	MVSATA_DEBUG_PRINT(crqb, sizeof(union mvsata_crqb),
3769 	    n, (u_char *)(mvport->port_crqb + n));
3770 }
3771 
3772 static void
3773 mvsata_print_crpb(struct mvsata_port *mvport, int n)
3774 {
3775 
3776 	MVSATA_DEBUG_PRINT(crpb, sizeof(struct crpb),
3777 	    n, (u_char *)(mvport->port_crpb + n));
3778 }
3779 
3780 static void
3781 mvsata_print_eprd(struct mvsata_port *mvport, int n)
3782 {
3783 	struct eprd *eprd;
3784 	int i = 0;
3785 
3786 	eprd = mvport->port_reqtbl[n].eprd;
3787 	while (1 /*CONSTCOND*/) {
3788 		MVSATA_DEBUG_PRINT(eprd, sizeof(struct eprd),
3789 		    i, (u_char *)eprd);
3790 		if (eprd->eot & EPRD_EOT)
3791 			break;
3792 		eprd++;
3793 		i++;
3794 	}
3795 }
3796 #endif
3797 #endif
3798