xref: /netbsd-src/sys/dev/pci/cmdide.c (revision deb6f0161a9109e7de9b519dc8dfb9478668dcdd)
1 /*	$NetBSD: cmdide.c,v 1.43 2017/10/22 13:13:55 jdolecek Exp $	*/
2 
3 /*
4  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: cmdide.c,v 1.43 2017/10/22 13:13:55 jdolecek Exp $");
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/atomic.h>
33 
34 #include <dev/pci/pcivar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pciidereg.h>
37 #include <dev/pci/pciidevar.h>
38 #include <dev/pci/pciide_cmd_reg.h>
39 
40 #define CMDIDE_ACT_CHANNEL_NONE	0xff
41 
42 static int  cmdide_match(device_t, cfdata_t, void *);
43 static void cmdide_attach(device_t, device_t, void *);
44 
45 CFATTACH_DECL_NEW(cmdide, sizeof(struct pciide_softc),
46     cmdide_match, cmdide_attach, pciide_detach, NULL);
47 
48 static void cmd_chip_map(struct pciide_softc*, const struct pci_attach_args*);
49 static void cmd0643_9_chip_map(struct pciide_softc*,
50 			       const struct pci_attach_args*);
51 static void cmd0643_9_setup_channel(struct ata_channel*);
52 static void cmd_channel_map(const struct pci_attach_args *,
53 			    struct pciide_softc *, int);
54 static int cmd064x_claim_hw(struct ata_channel *, int);
55 static void cmd064x_free_hw(struct ata_channel *);
56 static int  cmd_pci_intr(void *);
57 static void cmd646_9_irqack(struct ata_channel *);
58 static void cmd680_chip_map(struct pciide_softc*,
59 			    const struct pci_attach_args*);
60 static void cmd680_setup_channel(struct ata_channel*);
61 static void cmd680_channel_map(const struct pci_attach_args *,
62 			       struct pciide_softc *, int);
63 
64 /* Older CMD64X doesn't have independent channels */
65 static const struct pciide_product_desc pciide_cmd_products[] =  {
66 	{ PCI_PRODUCT_CMDTECH_640,
67 	  IDE_SHARED_CHANNELS,
68 	  "CMD Technology PCI0640",
69 	  cmd_chip_map
70 	},
71 	{ PCI_PRODUCT_CMDTECH_643,
72 	  IDE_SHARED_CHANNELS,
73 	  "CMD Technology PCI0643",
74 	  cmd0643_9_chip_map,
75 	},
76 	{ PCI_PRODUCT_CMDTECH_646,
77 	  IDE_SHARED_CHANNELS,
78 	  "CMD Technology PCI0646",
79 	  cmd0643_9_chip_map,
80 	},
81 	{ PCI_PRODUCT_CMDTECH_648,
82 	  IDE_SHARED_CHANNELS,
83 	  "CMD Technology PCI0648",
84 	  cmd0643_9_chip_map,
85 	},
86 	{ PCI_PRODUCT_CMDTECH_649,
87 	  0,
88 	  "CMD Technology PCI0649",
89 	  cmd0643_9_chip_map,
90 	},
91 	{ PCI_PRODUCT_CMDTECH_680,
92 	  0,
93 	  "Silicon Image 0680",
94 	  cmd680_chip_map,
95 	},
96 	{ 0,
97 	  0,
98 	  NULL,
99 	  NULL
100 	}
101 };
102 
103 static int
104 cmdide_match(device_t parent, cfdata_t match, void *aux)
105 {
106 	struct pci_attach_args *pa = aux;
107 
108 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_CMDTECH) {
109 		if (pciide_lookup_product(pa->pa_id, pciide_cmd_products))
110 			return (2);
111 	}
112 	return (0);
113 }
114 
115 static void
116 cmdide_attach(device_t parent, device_t self, void *aux)
117 {
118 	struct pci_attach_args *pa = aux;
119 	struct pciide_softc *sc = device_private(self);
120 
121 	sc->sc_wdcdev.sc_atac.atac_dev = self;
122 
123 	pciide_common_attach(sc, pa,
124 	    pciide_lookup_product(pa->pa_id, pciide_cmd_products));
125 
126 }
127 
128 static void
129 cmd_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc,
130     int channel)
131 {
132 	struct pciide_channel *cp = &sc->pciide_channels[channel];
133 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
134 	int interface;
135 	bool one_channel = ISSET(sc->sc_pp->ide_flags, IDE_SHARED_CHANNELS);
136 
137 	/*
138 	 * The 0648/0649 can be told to identify as a RAID controller.
139 	 * In this case, we have to fake interface
140 	 */
141 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
142 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
143 		    PCIIDE_INTERFACE_SETTABLE(1);
144 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
145 		    CMD_CONF_DSA1)
146 			interface |= PCIIDE_INTERFACE_PCI(0) |
147 			    PCIIDE_INTERFACE_PCI(1);
148 	} else {
149 		interface = PCI_INTERFACE(pa->pa_class);
150 	}
151 
152 	sc->wdc_chanarray[channel] = &cp->ata_channel;
153 	cp->name = PCIIDE_CHANNEL_NAME(channel);
154 	cp->ata_channel.ch_channel = channel;
155 	cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
156 
157 	if (channel > 0 && one_channel) {
158 		/* Channels are not independant, need synchronization */
159 		sc->sc_wdcdev.sc_atac.atac_claim_hw = cmd064x_claim_hw;
160 		sc->sc_wdcdev.sc_atac.atac_free_hw  = cmd064x_free_hw;
161 		sc->sc_cmd_act_channel = CMDIDE_ACT_CHANNEL_NONE;
162 	}
163 
164 	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
165 	    "%s channel %s to %s mode%s\n", cp->name,
166 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
167 	    "configured" : "wired",
168 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
169 	    "native-PCI" : "compatibility",
170 	    one_channel ? ", channel non-independant" : "");
171 
172 	/*
173 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
174 	 * there's no way to disable the first channel without disabling
175 	 * the whole device
176 	 */
177 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
178 		aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
179 		    "%s channel ignored (disabled)\n", cp->name);
180 		cp->ata_channel.ch_flags |= ATACH_DISABLED;
181 		return;
182 	}
183 
184 	pciide_mapchan(pa, cp, interface, cmd_pci_intr);
185 }
186 
187 /*
188  * Check if we can execute next xfer on the channel.
189  * Called with chp channel lock held.
190  */
191 static int
192 cmd064x_claim_hw(struct ata_channel *chp, int maysleep)
193 {
194 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
195 
196 	return atomic_cas_uint(&sc->sc_cmd_act_channel,
197 	    CMDIDE_ACT_CHANNEL_NONE, chp->ch_channel)
198 	    == CMDIDE_ACT_CHANNEL_NONE;
199 }
200 
201 /* Allow another channel to run. Called with ochp channel lock held. */
202 static void
203 cmd064x_free_hw(struct ata_channel *ochp)
204 {
205 	struct pciide_softc *sc = CHAN_TO_PCIIDE(ochp);
206 	uint oact = atomic_cas_uint(&sc->sc_cmd_act_channel,
207 	    ochp->ch_channel, CMDIDE_ACT_CHANNEL_NONE);
208 	struct ata_channel *nchp;
209 
210 	KASSERT(oact == ochp->ch_channel);
211 
212 	/* Start the other channel(s) */
213 	for(uint i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
214 		/* Skip the current channel */
215 		if (i == oact)
216 			continue;
217 
218 		nchp = &sc->pciide_channels[i].ata_channel;
219 		if (nchp->ch_ndrives == 0)
220 			continue;
221 
222 		atastart(nchp);
223 	}
224 }
225 
226 static int
227 cmd_pci_intr(void *arg)
228 {
229 	struct pciide_softc *sc = arg;
230 	struct pciide_channel *cp;
231 	struct ata_channel *wdc_cp;
232 	int i, rv, crv;
233 	u_int32_t priirq, secirq;
234 
235 	rv = 0;
236 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
237 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
238 	for (i = 0; i < sc->sc_wdcdev.sc_atac.atac_nchannels; i++) {
239 		cp = &sc->pciide_channels[i];
240 		wdc_cp = &cp->ata_channel;
241 		/* If a compat channel skip. */
242 		if (cp->compat)
243 			continue;
244 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
245 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
246 			crv = wdcintr(wdc_cp);
247 			if (crv == 0) {
248 				aprint_error("%s:%d: bogus intr\n",
249 				    device_xname(
250 				      sc->sc_wdcdev.sc_atac.atac_dev), i);
251 				sc->sc_wdcdev.irqack(wdc_cp);
252 			} else
253 				rv = 1;
254 		}
255 	}
256 	return rv;
257 }
258 
259 static void
260 cmd_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
261 {
262 	int channel;
263 
264 	/*
265 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
266 	 * and base addresses registers can be disabled at
267 	 * hardware level. In this case, the device is wired
268 	 * in compat mode and its first channel is always enabled,
269 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
270 	 * In fact, it seems that the first channel of the CMD PCI0640
271 	 * can't be disabled.
272 	 */
273 
274 #ifdef PCIIDE_CMD064x_DISABLE
275 	if (pciide_chipen(sc, pa) == 0)
276 		return;
277 #endif
278 
279 	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
280 	    "hardware does not support DMA\n");
281 	sc->sc_dma_ok = 0;
282 
283 	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
284 	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
285 	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16;
286 	sc->sc_wdcdev.wdc_maxdrives = 2;
287 
288 	wdc_allocate_regs(&sc->sc_wdcdev);
289 
290 	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
291 	     channel++) {
292 		cmd_channel_map(pa, sc, channel);
293 	}
294 }
295 
296 static void
297 cmd0643_9_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
298 {
299 	int channel;
300 	pcireg_t rev = PCI_REVISION(pa->pa_class);
301 
302 	/*
303 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
304 	 * and base addresses registers can be disabled at
305 	 * hardware level. In this case, the device is wired
306 	 * in compat mode and its first channel is always enabled,
307 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
308 	 * In fact, it seems that the first channel of the CMD PCI0640
309 	 * can't be disabled.
310 	 */
311 
312 #ifdef PCIIDE_CMD064x_DISABLE
313 	if (pciide_chipen(sc, pa) == 0)
314 		return;
315 #endif
316 
317 	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
318 	    "bus-master DMA support present");
319 	pciide_mapreg_dma(sc, pa);
320 	aprint_verbose("\n");
321 	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
322 	if (sc->sc_dma_ok) {
323 		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
324 		switch (sc->sc_pp->ide_product) {
325 		case PCI_PRODUCT_CMDTECH_649:
326 			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
327 			sc->sc_wdcdev.sc_atac.atac_udma_cap = 5;
328 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
329 			break;
330 		case PCI_PRODUCT_CMDTECH_648:
331 			sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
332 			sc->sc_wdcdev.sc_atac.atac_udma_cap = 4;
333 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
334 			break;
335 		case PCI_PRODUCT_CMDTECH_646:
336 			if (rev >= CMD0646U2_REV) {
337 				sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
338 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
339 			} else if (rev >= CMD0646U_REV) {
340 			/*
341 			 * Linux's driver claims that the 646U is broken
342 			 * with UDMA. Only enable it if we know what we're
343 			 * doing
344 			 */
345 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
346 				sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
347 				sc->sc_wdcdev.sc_atac.atac_udma_cap = 2;
348 #endif
349 				/* explicitly disable UDMA */
350 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
351 				    CMD_UDMATIM(0), 0);
352 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
353 				    CMD_UDMATIM(1), 0);
354 			}
355 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
356 			break;
357 		default:
358 			sc->sc_wdcdev.irqack = pciide_irqack;
359 		}
360 	}
361 
362 	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
363 	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
364 	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
365 	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
366 	sc->sc_wdcdev.sc_atac.atac_set_modes = cmd0643_9_setup_channel;
367 	sc->sc_wdcdev.wdc_maxdrives = 2;
368 
369 	ATADEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
370 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
371 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
372 		DEBUG_PROBE);
373 
374 	wdc_allocate_regs(&sc->sc_wdcdev);
375 
376 	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
377 	     channel++)
378 		cmd_channel_map(pa, sc, channel);
379 
380 	/*
381 	 * note - this also makes sure we clear the irq disable and reset
382 	 * bits
383 	 */
384 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
385 	ATADEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
386 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
387 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
388 	    DEBUG_PROBE);
389 }
390 
391 static void
392 cmd0643_9_setup_channel(struct ata_channel *chp)
393 {
394 	struct ata_drive_datas *drvp;
395 	u_int8_t tim;
396 	u_int32_t idedma_ctl, udma_reg;
397 	int drive, s;
398 	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
399 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
400 
401 	idedma_ctl = 0;
402 	/* setup DMA if needed */
403 	pciide_channel_dma_setup(cp);
404 
405 	for (drive = 0; drive < 2; drive++) {
406 		drvp = &chp->ch_drive[drive];
407 		/* If no drive, skip */
408 		if (drvp->drive_type == ATA_DRIVET_NONE)
409 			continue;
410 		/* add timing values, setup DMA if needed */
411 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
412 		if (drvp->drive_flags & (ATA_DRIVE_DMA | ATA_DRIVE_UDMA)) {
413 			if (drvp->drive_flags & ATA_DRIVE_UDMA) {
414 				/* UltraDMA on a 646U2, 0648 or 0649 */
415 				s = splbio();
416 				drvp->drive_flags &= ~ATA_DRIVE_DMA;
417 				splx(s);
418 				udma_reg = pciide_pci_read(sc->sc_pc,
419 				    sc->sc_tag, CMD_UDMATIM(chp->ch_channel));
420 				if (drvp->UDMA_mode > 2 &&
421 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
422 				    CMD_BICSR) &
423 				    CMD_BICSR_80(chp->ch_channel)) == 0)
424 					drvp->UDMA_mode = 2;
425 				if (drvp->UDMA_mode > 2)
426 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
427 				else if (sc->sc_wdcdev.sc_atac.atac_udma_cap > 2)
428 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
429 				udma_reg |= CMD_UDMATIM_UDMA(drive);
430 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
431 				    CMD_UDMATIM_TIM_OFF(drive));
432 				udma_reg |=
433 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
434 				    CMD_UDMATIM_TIM_OFF(drive));
435 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
436 				    CMD_UDMATIM(chp->ch_channel), udma_reg);
437 			} else {
438 				/*
439 				 * use Multiword DMA.
440 				 * Timings will be used for both PIO and DMA,
441 				 * so adjust DMA mode if needed
442 				 * if we have a 0646U2/8/9, turn off UDMA
443 				 */
444 				if (sc->sc_wdcdev.sc_atac.atac_cap & ATAC_CAP_UDMA) {
445 					udma_reg = pciide_pci_read(sc->sc_pc,
446 					    sc->sc_tag,
447 					    CMD_UDMATIM(chp->ch_channel));
448 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
449 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
450 					    CMD_UDMATIM(chp->ch_channel),
451 					    udma_reg);
452 				}
453 				if (drvp->PIO_mode >= 3 &&
454 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
455 					drvp->DMA_mode = drvp->PIO_mode - 2;
456 				}
457 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
458 			}
459 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
460 		}
461 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
462 		    CMD_DATA_TIM(chp->ch_channel, drive), tim);
463 	}
464 	if (idedma_ctl != 0) {
465 		/* Add software bits in status register */
466 		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
467 		    idedma_ctl);
468 	}
469 }
470 
471 static void
472 cmd646_9_irqack(struct ata_channel *chp)
473 {
474 	u_int32_t priirq, secirq;
475 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
476 
477 	if (chp->ch_channel == 0) {
478 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
479 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
480 	} else {
481 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
482 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
483 	}
484 	pciide_irqack(chp);
485 }
486 
487 static void
488 cmd680_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa)
489 {
490 	int channel;
491 
492 	if (pciide_chipen(sc, pa) == 0)
493 		return;
494 
495 	aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev,
496 	    "bus-master DMA support present");
497 	pciide_mapreg_dma(sc, pa);
498 	aprint_verbose("\n");
499 	sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32;
500 	if (sc->sc_dma_ok) {
501 		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA;
502 		sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA;
503 		sc->sc_wdcdev.sc_atac.atac_udma_cap = 6;
504 		sc->sc_wdcdev.irqack = pciide_irqack;
505 	}
506 
507 	sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray;
508 	sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS;
509 	sc->sc_wdcdev.sc_atac.atac_pio_cap = 4;
510 	sc->sc_wdcdev.sc_atac.atac_dma_cap = 2;
511 	sc->sc_wdcdev.sc_atac.atac_set_modes = cmd680_setup_channel;
512 	sc->sc_wdcdev.wdc_maxdrives = 2;
513 
514 	pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00);
515 	pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00);
516 	pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a,
517 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01);
518 
519 	wdc_allocate_regs(&sc->sc_wdcdev);
520 
521 	for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels;
522 	     channel++)
523 		cmd680_channel_map(pa, sc, channel);
524 }
525 
526 static void
527 cmd680_channel_map(const struct pci_attach_args *pa, struct pciide_softc *sc,
528     int channel)
529 {
530 	struct pciide_channel *cp = &sc->pciide_channels[channel];
531 	int interface, i, reg;
532 	static const u_int8_t init_val[] =
533 	    {             0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32,
534 	      0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 };
535 
536 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
537 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
538 		    PCIIDE_INTERFACE_SETTABLE(1);
539 		interface |= PCIIDE_INTERFACE_PCI(0) |
540 		    PCIIDE_INTERFACE_PCI(1);
541 	} else {
542 		interface = PCI_INTERFACE(pa->pa_class);
543 	}
544 
545 	sc->wdc_chanarray[channel] = &cp->ata_channel;
546 	cp->name = PCIIDE_CHANNEL_NAME(channel);
547 	cp->ata_channel.ch_channel = channel;
548 	cp->ata_channel.ch_atac = &sc->sc_wdcdev.sc_atac;
549 
550 	/* XXX */
551 	reg = 0xa2 + channel * 16;
552 	for (i = 0; i < sizeof(init_val); i++)
553 		pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]);
554 
555 	aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev,
556 	    "%s channel %s to %s mode\n", cp->name,
557 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
558 	    "configured" : "wired",
559 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
560 	    "native-PCI" : "compatibility");
561 
562 	pciide_mapchan(pa, cp, interface, pciide_pci_intr);
563 }
564 
565 static void
566 cmd680_setup_channel(struct ata_channel *chp)
567 {
568 	struct ata_drive_datas *drvp;
569 	u_int8_t mode, off, scsc;
570 	u_int16_t val;
571 	u_int32_t idedma_ctl;
572 	int drive, s;
573 	struct pciide_channel *cp = CHAN_TO_PCHAN(chp);
574 	struct pciide_softc *sc = CHAN_TO_PCIIDE(chp);
575 	pci_chipset_tag_t pc = sc->sc_pc;
576 	pcitag_t pa = sc->sc_tag;
577 	static const u_int8_t udma2_tbl[] =
578 	    { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 };
579 	static const u_int8_t udma_tbl[] =
580 	    { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 };
581 	static const u_int16_t dma_tbl[] =
582 	    { 0x2208, 0x10c2, 0x10c1 };
583 	static const u_int16_t pio_tbl[] =
584 	    { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
585 
586 	idedma_ctl = 0;
587 	pciide_channel_dma_setup(cp);
588 	mode = pciide_pci_read(pc, pa, 0x80 + chp->ch_channel * 4);
589 
590 	for (drive = 0; drive < 2; drive++) {
591 		drvp = &chp->ch_drive[drive];
592 		/* If no drive, skip */
593 		if (drvp->drive_type == ATA_DRIVET_NONE)
594 			continue;
595 		mode &= ~(0x03 << (drive * 4));
596 		if (drvp->drive_flags & ATA_DRIVE_UDMA) {
597 			s = splbio();
598 			drvp->drive_flags &= ~ATA_DRIVE_DMA;
599 			splx(s);
600 			off = 0xa0 + chp->ch_channel * 16;
601 			if (drvp->UDMA_mode > 2 &&
602 			    (pciide_pci_read(pc, pa, off) & 0x01) == 0)
603 				drvp->UDMA_mode = 2;
604 			scsc = pciide_pci_read(pc, pa, 0x8a);
605 			if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) {
606 				pciide_pci_write(pc, pa, 0x8a, scsc | 0x01);
607 				scsc = pciide_pci_read(pc, pa, 0x8a);
608 				if ((scsc & 0x30) == 0)
609 					drvp->UDMA_mode = 5;
610 			}
611 			mode |= 0x03 << (drive * 4);
612 			off = 0xac + chp->ch_channel * 16 + drive * 2;
613 			val = pciide_pci_read(pc, pa, off) & ~0x3f;
614 			if (scsc & 0x30)
615 				val |= udma2_tbl[drvp->UDMA_mode];
616 			else
617 				val |= udma_tbl[drvp->UDMA_mode];
618 			pciide_pci_write(pc, pa, off, val);
619 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
620 		} else if (drvp->drive_flags & ATA_DRIVE_DMA) {
621 			mode |= 0x02 << (drive * 4);
622 			off = 0xa8 + chp->ch_channel * 16 + drive * 2;
623 			val = dma_tbl[drvp->DMA_mode];
624 			pciide_pci_write(pc, pa, off, val & 0xff);
625 			pciide_pci_write(pc, pa, off+1, val >> 8);
626 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
627 		} else {
628 			mode |= 0x01 << (drive * 4);
629 			off = 0xa4 + chp->ch_channel * 16 + drive * 2;
630 			val = pio_tbl[drvp->PIO_mode];
631 			pciide_pci_write(pc, pa, off, val & 0xff);
632 			pciide_pci_write(pc, pa, off+1, val >> 8);
633 		}
634 	}
635 
636 	pciide_pci_write(pc, pa, 0x80 + chp->ch_channel * 4, mode);
637 	if (idedma_ctl != 0) {
638 		/* Add software bits in status register */
639 		bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0,
640 		    idedma_ctl);
641 	}
642 }
643