xref: /netbsd-src/sys/dev/pci/pciide.c (revision 3cec974c61d7fac0a37c0377723a33214a458c8b)
1 /*	$NetBSD: pciide.c,v 1.107 2001/02/18 18:07:53 bouyer Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 1999 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 
37 /*
38  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Christopher G. Demetriou
51  *	for the NetBSD Project.
52  * 4. The name of the author may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * PCI IDE controller driver.
69  *
70  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71  * sys/dev/pci/ppb.c, revision 1.16).
72  *
73  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75  * 5/16/94" from the PCI SIG.
76  *
77  */
78 
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82 
83 #define DEBUG_DMA   0x01
84 #define DEBUG_XFERS  0x02
85 #define DEBUG_FUNCS  0x08
86 #define DEBUG_PROBE  0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 	if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <machine/endian.h>
102 
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/cy82c693var.h>
119 
120 #include "opt_pciide.h"
121 
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 					      int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 					   int, u_int8_t));
127 
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 	pci_chipset_tag_t pc;
131 	pcitag_t pa;
132 	int reg;
133 {
134 
135 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 	    ((reg & 0x03) * 8) & 0xff);
137 }
138 
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 	pci_chipset_tag_t pc;
142 	pcitag_t pa;
143 	int reg;
144 	u_int8_t val;
145 {
146 	pcireg_t pcival;
147 
148 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 	pcival &= ~(0xff << ((reg & 0x03) * 8));
150 	pcival |= (val << ((reg & 0x03) * 8));
151 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153 
154 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
155 
156 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
157 void piix_setup_channel __P((struct channel_softc*));
158 void piix3_4_setup_channel __P((struct channel_softc*));
159 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
160 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
161 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
162 
163 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
164 void amd756_setup_channel __P((struct channel_softc*));
165 
166 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
167 void apollo_setup_channel __P((struct channel_softc*));
168 
169 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
170 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_setup_channel __P((struct channel_softc*));
172 void cmd_channel_map __P((struct pci_attach_args *,
173 			struct pciide_softc *, int));
174 int  cmd_pci_intr __P((void *));
175 void cmd646_9_irqack __P((struct channel_softc *));
176 
177 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
178 void cy693_setup_channel __P((struct channel_softc*));
179 
180 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
181 void sis_setup_channel __P((struct channel_softc*));
182 
183 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
184 void acer_setup_channel __P((struct channel_softc*));
185 int  acer_pci_intr __P((void *));
186 
187 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
188 void pdc202xx_setup_channel __P((struct channel_softc*));
189 int  pdc202xx_pci_intr __P((void *));
190 
191 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
192 void opti_setup_channel __P((struct channel_softc*));
193 
194 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void hpt_setup_channel __P((struct channel_softc*));
196 int  hpt_pci_intr __P((void *));
197 
198 void pciide_channel_dma_setup __P((struct pciide_channel *));
199 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
200 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
201 void pciide_dma_start __P((void*, int, int));
202 int  pciide_dma_finish __P((void*, int, int, int));
203 void pciide_irqack __P((struct channel_softc *));
204 void pciide_print_modes __P((struct pciide_channel *));
205 
206 struct pciide_product_desc {
207 	u_int32_t ide_product;
208 	int ide_flags;
209 	const char *ide_name;
210 	/* map and setup chip, probe drives */
211 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
212 };
213 
214 /* Flags for ide_flags */
215 #define IDE_PCI_CLASS_OVERRIDE	0x0001 /* accept even if class != pciide */
216 #define	IDE_16BIT_IOSPACE	0x0002 /* I/O space BARS ignore upper word */
217 
218 /* Default product description for devices not known from this controller */
219 const struct pciide_product_desc default_product_desc = {
220 	0,
221 	0,
222 	"Generic PCI IDE controller",
223 	default_chip_map,
224 };
225 
226 const struct pciide_product_desc pciide_intel_products[] =  {
227 	{ PCI_PRODUCT_INTEL_82092AA,
228 	  0,
229 	  "Intel 82092AA IDE controller",
230 	  default_chip_map,
231 	},
232 	{ PCI_PRODUCT_INTEL_82371FB_IDE,
233 	  0,
234 	  "Intel 82371FB IDE controller (PIIX)",
235 	  piix_chip_map,
236 	},
237 	{ PCI_PRODUCT_INTEL_82371SB_IDE,
238 	  0,
239 	  "Intel 82371SB IDE Interface (PIIX3)",
240 	  piix_chip_map,
241 	},
242 	{ PCI_PRODUCT_INTEL_82371AB_IDE,
243 	  0,
244 	  "Intel 82371AB IDE controller (PIIX4)",
245 	  piix_chip_map,
246 	},
247 	{ PCI_PRODUCT_INTEL_82440MX_IDE,
248 	  0,
249 	  "Intel 82440MX IDE controller",
250 	  piix_chip_map
251 	},
252 	{ PCI_PRODUCT_INTEL_82801AA_IDE,
253 	  0,
254 	  "Intel 82801AA IDE Controller (ICH)",
255 	  piix_chip_map,
256 	},
257 	{ PCI_PRODUCT_INTEL_82801AB_IDE,
258 	  0,
259 	  "Intel 82801AB IDE Controller (ICH0)",
260 	  piix_chip_map,
261 	},
262 	{ PCI_PRODUCT_INTEL_82801BA_IDE,
263 	  0,
264 	  "Intel 82801BA IDE Controller (ICH2)",
265 	  piix_chip_map,
266 	},
267 	{ PCI_PRODUCT_INTEL_82801BAM_IDE,
268 	  0,
269 	  "Intel 82801BAM IDE Controller (ICH2)",
270 	  piix_chip_map,
271 	},
272 	{ 0,
273 	  0,
274 	  NULL,
275 	}
276 };
277 
278 const struct pciide_product_desc pciide_amd_products[] =  {
279 	{ PCI_PRODUCT_AMD_PBC756_IDE,
280 	  0,
281 	  "Advanced Micro Devices AMD756 IDE Controller",
282 	  amd756_chip_map
283 	},
284 	{ 0,
285 	  0,
286 	  NULL,
287 	}
288 };
289 
290 const struct pciide_product_desc pciide_cmd_products[] =  {
291 	{ PCI_PRODUCT_CMDTECH_640,
292 	  0,
293 	  "CMD Technology PCI0640",
294 	  cmd_chip_map
295 	},
296 	{ PCI_PRODUCT_CMDTECH_643,
297 	  0,
298 	  "CMD Technology PCI0643",
299 	  cmd0643_9_chip_map,
300 	},
301 	{ PCI_PRODUCT_CMDTECH_646,
302 	  0,
303 	  "CMD Technology PCI0646",
304 	  cmd0643_9_chip_map,
305 	},
306 	{ PCI_PRODUCT_CMDTECH_648,
307 	  IDE_PCI_CLASS_OVERRIDE,
308 	  "CMD Technology PCI0648",
309 	  cmd0643_9_chip_map,
310 	},
311 	{ PCI_PRODUCT_CMDTECH_649,
312 	  IDE_PCI_CLASS_OVERRIDE,
313 	  "CMD Technology PCI0649",
314 	  cmd0643_9_chip_map,
315 	},
316 	{ 0,
317 	  0,
318 	  NULL,
319 	}
320 };
321 
322 const struct pciide_product_desc pciide_via_products[] =  {
323 	{ PCI_PRODUCT_VIATECH_VT82C586_IDE,
324 	  0,
325 	  "VIA Tech VT82C586 IDE Controller",
326 	  apollo_chip_map,
327 	 },
328 	{ PCI_PRODUCT_VIATECH_VT82C586A_IDE,
329 	  0,
330 	  "VIA Tech VT82C586A IDE Controller",
331 	  apollo_chip_map,
332 	},
333 	{ 0,
334 	  0,
335 	  NULL,
336 	}
337 };
338 
339 const struct pciide_product_desc pciide_cypress_products[] =  {
340 	{ PCI_PRODUCT_CONTAQ_82C693,
341 	  IDE_16BIT_IOSPACE,
342 	  "Cypress 82C693 IDE Controller",
343 	  cy693_chip_map,
344 	},
345 	{ 0,
346 	  0,
347 	  NULL,
348 	}
349 };
350 
351 const struct pciide_product_desc pciide_sis_products[] =  {
352 	{ PCI_PRODUCT_SIS_5597_IDE,
353 	  0,
354 	  "Silicon Integrated System 5597/5598 IDE controller",
355 	  sis_chip_map,
356 	},
357 	{ 0,
358 	  0,
359 	  NULL,
360 	}
361 };
362 
363 const struct pciide_product_desc pciide_acer_products[] =  {
364 	{ PCI_PRODUCT_ALI_M5229,
365 	  0,
366 	  "Acer Labs M5229 UDMA IDE Controller",
367 	  acer_chip_map,
368 	},
369 	{ 0,
370 	  0,
371 	  NULL,
372 	}
373 };
374 
375 const struct pciide_product_desc pciide_promise_products[] =  {
376 	{ PCI_PRODUCT_PROMISE_ULTRA33,
377 	  IDE_PCI_CLASS_OVERRIDE,
378 	  "Promise Ultra33/ATA Bus Master IDE Accelerator",
379 	  pdc202xx_chip_map,
380 	},
381 	{ PCI_PRODUCT_PROMISE_ULTRA66,
382 	  IDE_PCI_CLASS_OVERRIDE,
383 	  "Promise Ultra66/ATA Bus Master IDE Accelerator",
384 	  pdc202xx_chip_map,
385 	},
386 	{ PCI_PRODUCT_PROMISE_ULTRA100,
387 	  IDE_PCI_CLASS_OVERRIDE,
388 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
389 	  pdc202xx_chip_map,
390 	},
391 	{ PCI_PRODUCT_PROMISE_ULTRA100X,
392 	  IDE_PCI_CLASS_OVERRIDE,
393 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
394 	  pdc202xx_chip_map,
395 	},
396 	{ 0,
397 	  0,
398 	  NULL,
399 	}
400 };
401 
402 const struct pciide_product_desc pciide_opti_products[] =  {
403 	{ PCI_PRODUCT_OPTI_82C621,
404 	  0,
405 	  "OPTi 82c621 PCI IDE controller",
406 	  opti_chip_map,
407 	},
408 	{ PCI_PRODUCT_OPTI_82C568,
409 	  0,
410 	  "OPTi 82c568 (82c621 compatible) PCI IDE controller",
411 	  opti_chip_map,
412 	},
413 	{ PCI_PRODUCT_OPTI_82D568,
414 	  0,
415 	  "OPTi 82d568 (82c621 compatible) PCI IDE controller",
416 	  opti_chip_map,
417 	},
418 	{ 0,
419 	  0,
420 	  NULL,
421 	}
422 };
423 
424 const struct pciide_product_desc pciide_triones_products[] =  {
425 	{ PCI_PRODUCT_TRIONES_HPT366,
426 	  IDE_PCI_CLASS_OVERRIDE,
427 	  "Triones/Highpoint HPT366/370 IDE Controller",
428 	  hpt_chip_map,
429 	},
430 	{ 0,
431 	  0,
432 	  NULL,
433 	}
434 };
435 
436 struct pciide_vendor_desc {
437 	u_int32_t ide_vendor;
438 	const struct pciide_product_desc *ide_products;
439 };
440 
441 const struct pciide_vendor_desc pciide_vendors[] = {
442 	{ PCI_VENDOR_INTEL, pciide_intel_products },
443 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products },
444 	{ PCI_VENDOR_VIATECH, pciide_via_products },
445 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products },
446 	{ PCI_VENDOR_SIS, pciide_sis_products },
447 	{ PCI_VENDOR_ALI, pciide_acer_products },
448 	{ PCI_VENDOR_PROMISE, pciide_promise_products },
449 	{ PCI_VENDOR_AMD, pciide_amd_products },
450 	{ PCI_VENDOR_OPTI, pciide_opti_products },
451 	{ PCI_VENDOR_TRIONES, pciide_triones_products },
452 	{ 0, NULL }
453 };
454 
455 /* options passed via the 'flags' config keyword */
456 #define PCIIDE_OPTIONS_DMA	0x01
457 
458 int	pciide_match __P((struct device *, struct cfdata *, void *));
459 void	pciide_attach __P((struct device *, struct device *, void *));
460 
461 struct cfattach pciide_ca = {
462 	sizeof(struct pciide_softc), pciide_match, pciide_attach
463 };
464 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
465 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
466 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
467 int	pciide_mapregs_native __P((struct pci_attach_args *,
468 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
469 	    int (*pci_intr) __P((void *))));
470 void	pciide_mapreg_dma __P((struct pciide_softc *,
471 	    struct pci_attach_args *));
472 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
473 void	pciide_mapchan __P((struct pci_attach_args *,
474 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
475 	    int (*pci_intr) __P((void *))));
476 int	pciide_chan_candisable __P((struct pciide_channel *));
477 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
478 	    struct pciide_channel *, int, int));
479 int	pciide_print __P((void *, const char *pnp));
480 int	pciide_compat_intr __P((void *));
481 int	pciide_pci_intr __P((void *));
482 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
483 
484 const struct pciide_product_desc *
485 pciide_lookup_product(id)
486 	u_int32_t id;
487 {
488 	const struct pciide_product_desc *pp;
489 	const struct pciide_vendor_desc *vp;
490 
491 	for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
492 		if (PCI_VENDOR(id) == vp->ide_vendor)
493 			break;
494 
495 	if ((pp = vp->ide_products) == NULL)
496 		return NULL;
497 
498 	for (; pp->ide_name != NULL; pp++)
499 		if (PCI_PRODUCT(id) == pp->ide_product)
500 			break;
501 
502 	if (pp->ide_name == NULL)
503 		return NULL;
504 	return pp;
505 }
506 
507 int
508 pciide_match(parent, match, aux)
509 	struct device *parent;
510 	struct cfdata *match;
511 	void *aux;
512 {
513 	struct pci_attach_args *pa = aux;
514 	const struct pciide_product_desc *pp;
515 
516 	/*
517 	 * Check the ID register to see that it's a PCI IDE controller.
518 	 * If it is, we assume that we can deal with it; it _should_
519 	 * work in a standardized way...
520 	 */
521 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
522 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
523 		return (1);
524 	}
525 
526 	/*
527 	 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
528 	 * controllers. Let see if we can deal with it anyway.
529 	 */
530 	pp = pciide_lookup_product(pa->pa_id);
531 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
532 		return (1);
533 	}
534 
535 	return (0);
536 }
537 
538 void
539 pciide_attach(parent, self, aux)
540 	struct device *parent, *self;
541 	void *aux;
542 {
543 	struct pci_attach_args *pa = aux;
544 	pci_chipset_tag_t pc = pa->pa_pc;
545 	pcitag_t tag = pa->pa_tag;
546 	struct pciide_softc *sc = (struct pciide_softc *)self;
547 	pcireg_t csr;
548 	char devinfo[256];
549 	const char *displaydev;
550 
551 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
552 	if (sc->sc_pp == NULL) {
553 		sc->sc_pp = &default_product_desc;
554 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
555 		displaydev = devinfo;
556 	} else
557 		displaydev = sc->sc_pp->ide_name;
558 
559 	printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
560 
561 	sc->sc_pc = pa->pa_pc;
562 	sc->sc_tag = pa->pa_tag;
563 #ifdef WDCDEBUG
564 	if (wdcdebug_pciide_mask & DEBUG_PROBE)
565 		pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
566 #endif
567 	sc->sc_pp->chip_map(sc, pa);
568 
569 	if (sc->sc_dma_ok) {
570 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
571 		csr |= PCI_COMMAND_MASTER_ENABLE;
572 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
573 	}
574 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
575 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
576 }
577 
578 /* tell wether the chip is enabled or not */
579 int
580 pciide_chipen(sc, pa)
581 	struct pciide_softc *sc;
582 	struct pci_attach_args *pa;
583 {
584 	pcireg_t csr;
585 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
586 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
587 		    PCI_COMMAND_STATUS_REG);
588 		printf("%s: device disabled (at %s)\n",
589 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
590 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
591 		  "device" : "bridge");
592 		return 0;
593 	}
594 	return 1;
595 }
596 
597 int
598 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
599 	struct pci_attach_args *pa;
600 	struct pciide_channel *cp;
601 	int compatchan;
602 	bus_size_t *cmdsizep, *ctlsizep;
603 {
604 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
605 	struct channel_softc *wdc_cp = &cp->wdc_channel;
606 
607 	cp->compat = 1;
608 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
609 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
610 
611 	wdc_cp->cmd_iot = pa->pa_iot;
612 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
613 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
614 		printf("%s: couldn't map %s channel cmd regs\n",
615 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
616 		return (0);
617 	}
618 
619 	wdc_cp->ctl_iot = pa->pa_iot;
620 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
621 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
622 		printf("%s: couldn't map %s channel ctl regs\n",
623 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
624 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
625 		    PCIIDE_COMPAT_CMD_SIZE);
626 		return (0);
627 	}
628 
629 	return (1);
630 }
631 
632 int
633 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
634 	struct pci_attach_args * pa;
635 	struct pciide_channel *cp;
636 	bus_size_t *cmdsizep, *ctlsizep;
637 	int (*pci_intr) __P((void *));
638 {
639 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
640 	struct channel_softc *wdc_cp = &cp->wdc_channel;
641 	const char *intrstr;
642 	pci_intr_handle_t intrhandle;
643 
644 	cp->compat = 0;
645 
646 	if (sc->sc_pci_ih == NULL) {
647 		if (pci_intr_map(pa, &intrhandle) != 0) {
648 			printf("%s: couldn't map native-PCI interrupt\n",
649 			    sc->sc_wdcdev.sc_dev.dv_xname);
650 			return 0;
651 		}
652 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
653 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
654 		    intrhandle, IPL_BIO, pci_intr, sc);
655 		if (sc->sc_pci_ih != NULL) {
656 			printf("%s: using %s for native-PCI interrupt\n",
657 			    sc->sc_wdcdev.sc_dev.dv_xname,
658 			    intrstr ? intrstr : "unknown interrupt");
659 		} else {
660 			printf("%s: couldn't establish native-PCI interrupt",
661 			    sc->sc_wdcdev.sc_dev.dv_xname);
662 			if (intrstr != NULL)
663 				printf(" at %s", intrstr);
664 			printf("\n");
665 			return 0;
666 		}
667 	}
668 	cp->ih = sc->sc_pci_ih;
669 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
670 	    PCI_MAPREG_TYPE_IO, 0,
671 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
672 		printf("%s: couldn't map %s channel cmd regs\n",
673 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
674 		return 0;
675 	}
676 
677 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
678 	    PCI_MAPREG_TYPE_IO, 0,
679 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
680 		printf("%s: couldn't map %s channel ctl regs\n",
681 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
682 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
683 		return 0;
684 	}
685 	/*
686 	 * In native mode, 4 bytes of I/O space are mapped for the control
687 	 * register, the control register is at offset 2. Pass the generic
688 	 * code a handle for only one byte at the rigth offset.
689 	 */
690 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
691 	    &wdc_cp->ctl_ioh) != 0) {
692 		printf("%s: unable to subregion %s channel ctl regs\n",
693 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
694 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
695 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
696 		return 0;
697 	}
698 	return (1);
699 }
700 
701 void
702 pciide_mapreg_dma(sc, pa)
703 	struct pciide_softc *sc;
704 	struct pci_attach_args *pa;
705 {
706 	pcireg_t maptype;
707 	bus_addr_t addr;
708 
709 	/*
710 	 * Map DMA registers
711 	 *
712 	 * Note that sc_dma_ok is the right variable to test to see if
713 	 * DMA can be done.  If the interface doesn't support DMA,
714 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
715 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
716 	 * non-zero if the interface supports DMA and the registers
717 	 * could be mapped.
718 	 *
719 	 * XXX Note that despite the fact that the Bus Master IDE specs
720 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
721 	 * XXX space," some controllers (at least the United
722 	 * XXX Microelectronics UM8886BF) place it in memory space.
723 	 */
724 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
725 	    PCIIDE_REG_BUS_MASTER_DMA);
726 
727 	switch (maptype) {
728 	case PCI_MAPREG_TYPE_IO:
729 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
730 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
731 		    &addr, NULL, NULL) == 0);
732 		if (sc->sc_dma_ok == 0) {
733 			printf(", but unused (couldn't query registers)");
734 			break;
735 		}
736 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
737 		    && addr >= 0x10000) {
738 			sc->sc_dma_ok = 0;
739 			printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
740 			break;
741 		}
742 		/* FALLTHROUGH */
743 
744 	case PCI_MAPREG_MEM_TYPE_32BIT:
745 		sc->sc_dma_ok = (pci_mapreg_map(pa,
746 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
747 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
748 		sc->sc_dmat = pa->pa_dmat;
749 		if (sc->sc_dma_ok == 0) {
750 			printf(", but unused (couldn't map registers)");
751 		} else {
752 			sc->sc_wdcdev.dma_arg = sc;
753 			sc->sc_wdcdev.dma_init = pciide_dma_init;
754 			sc->sc_wdcdev.dma_start = pciide_dma_start;
755 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
756 		}
757 		break;
758 
759 	default:
760 		sc->sc_dma_ok = 0;
761 		printf(", but unsupported register maptype (0x%x)", maptype);
762 	}
763 }
764 
765 int
766 pciide_compat_intr(arg)
767 	void *arg;
768 {
769 	struct pciide_channel *cp = arg;
770 
771 #ifdef DIAGNOSTIC
772 	/* should only be called for a compat channel */
773 	if (cp->compat == 0)
774 		panic("pciide compat intr called for non-compat chan %p\n", cp);
775 #endif
776 	return (wdcintr(&cp->wdc_channel));
777 }
778 
779 int
780 pciide_pci_intr(arg)
781 	void *arg;
782 {
783 	struct pciide_softc *sc = arg;
784 	struct pciide_channel *cp;
785 	struct channel_softc *wdc_cp;
786 	int i, rv, crv;
787 
788 	rv = 0;
789 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
790 		cp = &sc->pciide_channels[i];
791 		wdc_cp = &cp->wdc_channel;
792 
793 		/* If a compat channel skip. */
794 		if (cp->compat)
795 			continue;
796 		/* if this channel not waiting for intr, skip */
797 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
798 			continue;
799 
800 		crv = wdcintr(wdc_cp);
801 		if (crv == 0)
802 			;		/* leave rv alone */
803 		else if (crv == 1)
804 			rv = 1;		/* claim the intr */
805 		else if (rv == 0)	/* crv should be -1 in this case */
806 			rv = crv;	/* if we've done no better, take it */
807 	}
808 	return (rv);
809 }
810 
811 void
812 pciide_channel_dma_setup(cp)
813 	struct pciide_channel *cp;
814 {
815 	int drive;
816 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 	struct ata_drive_datas *drvp;
818 
819 	for (drive = 0; drive < 2; drive++) {
820 		drvp = &cp->wdc_channel.ch_drive[drive];
821 		/* If no drive, skip */
822 		if ((drvp->drive_flags & DRIVE) == 0)
823 			continue;
824 		/* setup DMA if needed */
825 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
826 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
827 		    sc->sc_dma_ok == 0) {
828 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
829 			continue;
830 		}
831 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
832 		    != 0) {
833 			/* Abort DMA setup */
834 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
835 			continue;
836 		}
837 	}
838 }
839 
840 int
841 pciide_dma_table_setup(sc, channel, drive)
842 	struct pciide_softc *sc;
843 	int channel, drive;
844 {
845 	bus_dma_segment_t seg;
846 	int error, rseg;
847 	const bus_size_t dma_table_size =
848 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
849 	struct pciide_dma_maps *dma_maps =
850 	    &sc->pciide_channels[channel].dma_maps[drive];
851 
852 	/* If table was already allocated, just return */
853 	if (dma_maps->dma_table)
854 		return 0;
855 
856 	/* Allocate memory for the DMA tables and map it */
857 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
858 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
859 	    BUS_DMA_NOWAIT)) != 0) {
860 		printf("%s:%d: unable to allocate table DMA for "
861 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
862 		    channel, drive, error);
863 		return error;
864 	}
865 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
866 	    dma_table_size,
867 	    (caddr_t *)&dma_maps->dma_table,
868 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
869 		printf("%s:%d: unable to map table DMA for"
870 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
871 		    channel, drive, error);
872 		return error;
873 	}
874 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
875 	    "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
876 	    (unsigned long)seg.ds_addr), DEBUG_PROBE);
877 
878 	/* Create and load table DMA map for this disk */
879 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
880 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
881 	    &dma_maps->dmamap_table)) != 0) {
882 		printf("%s:%d: unable to create table DMA map for "
883 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
884 		    channel, drive, error);
885 		return error;
886 	}
887 	if ((error = bus_dmamap_load(sc->sc_dmat,
888 	    dma_maps->dmamap_table,
889 	    dma_maps->dma_table,
890 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
891 		printf("%s:%d: unable to load table DMA map for "
892 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 		    channel, drive, error);
894 		return error;
895 	}
896 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
897 	    (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
898 	    DEBUG_PROBE);
899 	/* Create a xfer DMA map for this drive */
900 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
901 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
902 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
903 	    &dma_maps->dmamap_xfer)) != 0) {
904 		printf("%s:%d: unable to create xfer DMA map for "
905 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
906 		    channel, drive, error);
907 		return error;
908 	}
909 	return 0;
910 }
911 
912 int
913 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
914 	void *v;
915 	int channel, drive;
916 	void *databuf;
917 	size_t datalen;
918 	int flags;
919 {
920 	struct pciide_softc *sc = v;
921 	int error, seg;
922 	struct pciide_dma_maps *dma_maps =
923 	    &sc->pciide_channels[channel].dma_maps[drive];
924 
925 	error = bus_dmamap_load(sc->sc_dmat,
926 	    dma_maps->dmamap_xfer,
927 	    databuf, datalen, NULL, BUS_DMA_NOWAIT);
928 	if (error) {
929 		printf("%s:%d: unable to load xfer DMA map for"
930 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
931 		    channel, drive, error);
932 		return error;
933 	}
934 
935 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
936 	    dma_maps->dmamap_xfer->dm_mapsize,
937 	    (flags & WDC_DMA_READ) ?
938 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
939 
940 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
941 #ifdef DIAGNOSTIC
942 		/* A segment must not cross a 64k boundary */
943 		{
944 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
945 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
946 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
947 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
948 			printf("pciide_dma: segment %d physical addr 0x%lx"
949 			    " len 0x%lx not properly aligned\n",
950 			    seg, phys, len);
951 			panic("pciide_dma: buf align");
952 		}
953 		}
954 #endif
955 		dma_maps->dma_table[seg].base_addr =
956 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
957 		dma_maps->dma_table[seg].byte_count =
958 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
959 		    IDEDMA_BYTE_COUNT_MASK);
960 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
961 		   seg, le32toh(dma_maps->dma_table[seg].byte_count),
962 		   le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
963 
964 	}
965 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
966 	    htole32(IDEDMA_BYTE_COUNT_EOT);
967 
968 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
969 	    dma_maps->dmamap_table->dm_mapsize,
970 	    BUS_DMASYNC_PREWRITE);
971 
972 	/* Maps are ready. Start DMA function */
973 #ifdef DIAGNOSTIC
974 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
975 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
976 		    (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
977 		panic("pciide_dma_init: table align");
978 	}
979 #endif
980 
981 	/* Clear status bits */
982 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
983 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
984 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
985 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
986 	/* Write table addr */
987 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
988 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
989 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
990 	/* set read/write */
991 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
992 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
993 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
994 	/* remember flags */
995 	dma_maps->dma_flags = flags;
996 	return 0;
997 }
998 
999 void
1000 pciide_dma_start(v, channel, drive)
1001 	void *v;
1002 	int channel, drive;
1003 {
1004 	struct pciide_softc *sc = v;
1005 
1006 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1007 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1008 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1009 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1010 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1011 }
1012 
1013 int
1014 pciide_dma_finish(v, channel, drive, force)
1015 	void *v;
1016 	int channel, drive;
1017 	int force;
1018 {
1019 	struct pciide_softc *sc = v;
1020 	u_int8_t status;
1021 	int error = 0;
1022 	struct pciide_dma_maps *dma_maps =
1023 	    &sc->pciide_channels[channel].dma_maps[drive];
1024 
1025 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1026 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1027 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1028 	    DEBUG_XFERS);
1029 
1030 	if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1031 		return WDC_DMAST_NOIRQ;
1032 
1033 	/* stop DMA channel */
1034 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1035 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1036 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1037 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1038 
1039 	/* Unload the map of the data buffer */
1040 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1041 	    dma_maps->dmamap_xfer->dm_mapsize,
1042 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1043 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1044 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1045 
1046 	if ((status & IDEDMA_CTL_ERR) != 0) {
1047 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1048 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1049 		error |= WDC_DMAST_ERR;
1050 	}
1051 
1052 	if ((status & IDEDMA_CTL_INTR) == 0) {
1053 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1054 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1055 		    drive, status);
1056 		error |= WDC_DMAST_NOIRQ;
1057 	}
1058 
1059 	if ((status & IDEDMA_CTL_ACT) != 0) {
1060 		/* data underrun, may be a valid condition for ATAPI */
1061 		error |= WDC_DMAST_UNDER;
1062 	}
1063 	return error;
1064 }
1065 
1066 void
1067 pciide_irqack(chp)
1068 	struct channel_softc *chp;
1069 {
1070 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1071 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1072 
1073 	/* clear status bits in IDE DMA registers */
1074 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1075 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1076 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1077 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1078 }
1079 
1080 /* some common code used by several chip_map */
1081 int
1082 pciide_chansetup(sc, channel, interface)
1083 	struct pciide_softc *sc;
1084 	int channel;
1085 	pcireg_t interface;
1086 {
1087 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1088 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1089 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1090 	cp->wdc_channel.channel = channel;
1091 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1092 	cp->wdc_channel.ch_queue =
1093 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1094 	if (cp->wdc_channel.ch_queue == NULL) {
1095 		printf("%s %s channel: "
1096 		    "can't allocate memory for command queue",
1097 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1098 		return 0;
1099 	}
1100 	printf("%s: %s channel %s to %s mode\n",
1101 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1102 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1103 	    "configured" : "wired",
1104 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1105 	    "native-PCI" : "compatibility");
1106 	return 1;
1107 }
1108 
1109 /* some common code used by several chip channel_map */
1110 void
1111 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1112 	struct pci_attach_args *pa;
1113 	struct pciide_channel *cp;
1114 	pcireg_t interface;
1115 	bus_size_t *cmdsizep, *ctlsizep;
1116 	int (*pci_intr) __P((void *));
1117 {
1118 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1119 
1120 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1121 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1122 		    pci_intr);
1123 	else
1124 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1125 		    wdc_cp->channel, cmdsizep, ctlsizep);
1126 
1127 	if (cp->hw_ok == 0)
1128 		return;
1129 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1130 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1131 	wdcattach(wdc_cp);
1132 }
1133 
1134 /*
1135  * Generic code to call to know if a channel can be disabled. Return 1
1136  * if channel can be disabled, 0 if not
1137  */
1138 int
1139 pciide_chan_candisable(cp)
1140 	struct pciide_channel *cp;
1141 {
1142 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1143 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1144 
1145 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1146 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1147 		printf("%s: disabling %s channel (no drives)\n",
1148 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1149 		cp->hw_ok = 0;
1150 		return 1;
1151 	}
1152 	return 0;
1153 }
1154 
1155 /*
1156  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1157  * Set hw_ok=0 on failure
1158  */
1159 void
1160 pciide_map_compat_intr(pa, cp, compatchan, interface)
1161 	struct pci_attach_args *pa;
1162 	struct pciide_channel *cp;
1163 	int compatchan, interface;
1164 {
1165 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1166 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1167 
1168 	if (cp->hw_ok == 0)
1169 		return;
1170 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1171 		return;
1172 
1173 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1174 	    pa, compatchan, pciide_compat_intr, cp);
1175 	if (cp->ih == NULL) {
1176 		printf("%s: no compatibility interrupt for use by %s "
1177 		    "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1178 		cp->hw_ok = 0;
1179 	}
1180 }
1181 
1182 void
1183 pciide_print_modes(cp)
1184 	struct pciide_channel *cp;
1185 {
1186 	wdc_print_modes(&cp->wdc_channel);
1187 }
1188 
1189 void
1190 default_chip_map(sc, pa)
1191 	struct pciide_softc *sc;
1192 	struct pci_attach_args *pa;
1193 {
1194 	struct pciide_channel *cp;
1195 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1196 	pcireg_t csr;
1197 	int channel, drive;
1198 	struct ata_drive_datas *drvp;
1199 	u_int8_t idedma_ctl;
1200 	bus_size_t cmdsize, ctlsize;
1201 	char *failreason;
1202 
1203 	if (pciide_chipen(sc, pa) == 0)
1204 		return;
1205 
1206 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1207 		printf("%s: bus-master DMA support present",
1208 		    sc->sc_wdcdev.sc_dev.dv_xname);
1209 		if (sc->sc_pp == &default_product_desc &&
1210 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1211 		    PCIIDE_OPTIONS_DMA) == 0) {
1212 			printf(", but unused (no driver support)");
1213 			sc->sc_dma_ok = 0;
1214 		} else {
1215 			pciide_mapreg_dma(sc, pa);
1216 		if (sc->sc_dma_ok != 0)
1217 			printf(", used without full driver "
1218 			    "support");
1219 		}
1220 	} else {
1221 		printf("%s: hardware does not support DMA",
1222 		    sc->sc_wdcdev.sc_dev.dv_xname);
1223 		sc->sc_dma_ok = 0;
1224 	}
1225 	printf("\n");
1226 	if (sc->sc_dma_ok) {
1227 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1228 		sc->sc_wdcdev.irqack = pciide_irqack;
1229 	}
1230 	sc->sc_wdcdev.PIO_cap = 0;
1231 	sc->sc_wdcdev.DMA_cap = 0;
1232 
1233 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1234 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1235 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1236 
1237 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1238 		cp = &sc->pciide_channels[channel];
1239 		if (pciide_chansetup(sc, channel, interface) == 0)
1240 			continue;
1241 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1242 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1243 			    &ctlsize, pciide_pci_intr);
1244 		} else {
1245 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1246 			    channel, &cmdsize, &ctlsize);
1247 		}
1248 		if (cp->hw_ok == 0)
1249 			continue;
1250 		/*
1251 		 * Check to see if something appears to be there.
1252 		 */
1253 		failreason = NULL;
1254 		if (!wdcprobe(&cp->wdc_channel)) {
1255 			failreason = "not responding; disabled or no drives?";
1256 			goto next;
1257 		}
1258 		/*
1259 		 * Now, make sure it's actually attributable to this PCI IDE
1260 		 * channel by trying to access the channel again while the
1261 		 * PCI IDE controller's I/O space is disabled.  (If the
1262 		 * channel no longer appears to be there, it belongs to
1263 		 * this controller.)  YUCK!
1264 		 */
1265 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1266 		    PCI_COMMAND_STATUS_REG);
1267 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1268 		    csr & ~PCI_COMMAND_IO_ENABLE);
1269 		if (wdcprobe(&cp->wdc_channel))
1270 			failreason = "other hardware responding at addresses";
1271 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1272 		    PCI_COMMAND_STATUS_REG, csr);
1273 next:
1274 		if (failreason) {
1275 			printf("%s: %s channel ignored (%s)\n",
1276 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1277 			    failreason);
1278 			cp->hw_ok = 0;
1279 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1280 			    cp->wdc_channel.cmd_ioh, cmdsize);
1281 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1282 			    cp->wdc_channel.ctl_ioh, ctlsize);
1283 		} else {
1284 			pciide_map_compat_intr(pa, cp, channel, interface);
1285 		}
1286 		if (cp->hw_ok) {
1287 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1288 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1289 			wdcattach(&cp->wdc_channel);
1290 		}
1291 	}
1292 
1293 	if (sc->sc_dma_ok == 0)
1294 		return;
1295 
1296 	/* Allocate DMA maps */
1297 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1298 		idedma_ctl = 0;
1299 		cp = &sc->pciide_channels[channel];
1300 		for (drive = 0; drive < 2; drive++) {
1301 			drvp = &cp->wdc_channel.ch_drive[drive];
1302 			/* If no drive, skip */
1303 			if ((drvp->drive_flags & DRIVE) == 0)
1304 				continue;
1305 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1306 				continue;
1307 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1308 				/* Abort DMA setup */
1309 				printf("%s:%d:%d: can't allocate DMA maps, "
1310 				    "using PIO transfers\n",
1311 				    sc->sc_wdcdev.sc_dev.dv_xname,
1312 				    channel, drive);
1313 				drvp->drive_flags &= ~DRIVE_DMA;
1314 			}
1315 			printf("%s:%d:%d: using DMA data transfers\n",
1316 			    sc->sc_wdcdev.sc_dev.dv_xname,
1317 			    channel, drive);
1318 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1319 		}
1320 		if (idedma_ctl != 0) {
1321 			/* Add software bits in status register */
1322 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1323 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1324 			    idedma_ctl);
1325 		}
1326 	}
1327 }
1328 
1329 void
1330 piix_chip_map(sc, pa)
1331 	struct pciide_softc *sc;
1332 	struct pci_attach_args *pa;
1333 {
1334 	struct pciide_channel *cp;
1335 	int channel;
1336 	u_int32_t idetim;
1337 	bus_size_t cmdsize, ctlsize;
1338 
1339 	if (pciide_chipen(sc, pa) == 0)
1340 		return;
1341 
1342 	printf("%s: bus-master DMA support present",
1343 	    sc->sc_wdcdev.sc_dev.dv_xname);
1344 	pciide_mapreg_dma(sc, pa);
1345 	printf("\n");
1346 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1347 	    WDC_CAPABILITY_MODE;
1348 	if (sc->sc_dma_ok) {
1349 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1350 		sc->sc_wdcdev.irqack = pciide_irqack;
1351 		switch(sc->sc_pp->ide_product) {
1352 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1353 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1354 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1355 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1356 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1357 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1358 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1359 		}
1360 	}
1361 	sc->sc_wdcdev.PIO_cap = 4;
1362 	sc->sc_wdcdev.DMA_cap = 2;
1363 	switch(sc->sc_pp->ide_product) {
1364 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1365 		sc->sc_wdcdev.UDMA_cap = 4;
1366 		break;
1367 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1368 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1369 		sc->sc_wdcdev.UDMA_cap = 5;
1370 		break;
1371 	default:
1372 		sc->sc_wdcdev.UDMA_cap = 2;
1373 	}
1374 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1375 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1376 	else
1377 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1378 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1379 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1380 
1381 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1382 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1383 	    DEBUG_PROBE);
1384 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1385 		WDCDEBUG_PRINT((", sidetim=0x%x",
1386 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1387 		    DEBUG_PROBE);
1388 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1389 			WDCDEBUG_PRINT((", udamreg 0x%x",
1390 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1391 			    DEBUG_PROBE);
1392 		}
1393 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1394 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1395 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1396 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1397 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1398 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1399 			    DEBUG_PROBE);
1400 		}
1401 
1402 	}
1403 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1404 
1405 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1406 		cp = &sc->pciide_channels[channel];
1407 		/* PIIX is compat-only */
1408 		if (pciide_chansetup(sc, channel, 0) == 0)
1409 			continue;
1410 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1411 		if ((PIIX_IDETIM_READ(idetim, channel) &
1412 		    PIIX_IDETIM_IDE) == 0) {
1413 			printf("%s: %s channel ignored (disabled)\n",
1414 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1415 			continue;
1416 		}
1417 		/* PIIX are compat-only pciide devices */
1418 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1419 		if (cp->hw_ok == 0)
1420 			continue;
1421 		if (pciide_chan_candisable(cp)) {
1422 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1423 			    channel);
1424 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1425 			    idetim);
1426 		}
1427 		pciide_map_compat_intr(pa, cp, channel, 0);
1428 		if (cp->hw_ok == 0)
1429 			continue;
1430 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1431 	}
1432 
1433 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1434 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1435 	    DEBUG_PROBE);
1436 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1437 		WDCDEBUG_PRINT((", sidetim=0x%x",
1438 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1439 		    DEBUG_PROBE);
1440 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1441 			WDCDEBUG_PRINT((", udamreg 0x%x",
1442 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1443 			    DEBUG_PROBE);
1444 		}
1445 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1446 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1447 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1448 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1449 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1450 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1451 			    DEBUG_PROBE);
1452 		}
1453 	}
1454 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1455 }
1456 
1457 void
1458 piix_setup_channel(chp)
1459 	struct channel_softc *chp;
1460 {
1461 	u_int8_t mode[2], drive;
1462 	u_int32_t oidetim, idetim, idedma_ctl;
1463 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1464 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1465 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1466 
1467 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1468 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1469 	idedma_ctl = 0;
1470 
1471 	/* set up new idetim: Enable IDE registers decode */
1472 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1473 	    chp->channel);
1474 
1475 	/* setup DMA */
1476 	pciide_channel_dma_setup(cp);
1477 
1478 	/*
1479 	 * Here we have to mess up with drives mode: PIIX can't have
1480 	 * different timings for master and slave drives.
1481 	 * We need to find the best combination.
1482 	 */
1483 
1484 	/* If both drives supports DMA, take the lower mode */
1485 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1486 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1487 		mode[0] = mode[1] =
1488 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1489 		    drvp[0].DMA_mode = mode[0];
1490 		    drvp[1].DMA_mode = mode[1];
1491 		goto ok;
1492 	}
1493 	/*
1494 	 * If only one drive supports DMA, use its mode, and
1495 	 * put the other one in PIO mode 0 if mode not compatible
1496 	 */
1497 	if (drvp[0].drive_flags & DRIVE_DMA) {
1498 		mode[0] = drvp[0].DMA_mode;
1499 		mode[1] = drvp[1].PIO_mode;
1500 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1501 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1502 			mode[1] = drvp[1].PIO_mode = 0;
1503 		goto ok;
1504 	}
1505 	if (drvp[1].drive_flags & DRIVE_DMA) {
1506 		mode[1] = drvp[1].DMA_mode;
1507 		mode[0] = drvp[0].PIO_mode;
1508 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1509 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1510 			mode[0] = drvp[0].PIO_mode = 0;
1511 		goto ok;
1512 	}
1513 	/*
1514 	 * If both drives are not DMA, takes the lower mode, unless
1515 	 * one of them is PIO mode < 2
1516 	 */
1517 	if (drvp[0].PIO_mode < 2) {
1518 		mode[0] = drvp[0].PIO_mode = 0;
1519 		mode[1] = drvp[1].PIO_mode;
1520 	} else if (drvp[1].PIO_mode < 2) {
1521 		mode[1] = drvp[1].PIO_mode = 0;
1522 		mode[0] = drvp[0].PIO_mode;
1523 	} else {
1524 		mode[0] = mode[1] =
1525 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1526 		drvp[0].PIO_mode = mode[0];
1527 		drvp[1].PIO_mode = mode[1];
1528 	}
1529 ok:	/* The modes are setup */
1530 	for (drive = 0; drive < 2; drive++) {
1531 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1532 			idetim |= piix_setup_idetim_timings(
1533 			    mode[drive], 1, chp->channel);
1534 			goto end;
1535 		}
1536 	}
1537 	/* If we are there, none of the drives are DMA */
1538 	if (mode[0] >= 2)
1539 		idetim |= piix_setup_idetim_timings(
1540 		    mode[0], 0, chp->channel);
1541 	else
1542 		idetim |= piix_setup_idetim_timings(
1543 		    mode[1], 0, chp->channel);
1544 end:	/*
1545 	 * timing mode is now set up in the controller. Enable
1546 	 * it per-drive
1547 	 */
1548 	for (drive = 0; drive < 2; drive++) {
1549 		/* If no drive, skip */
1550 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1551 			continue;
1552 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1553 		if (drvp[drive].drive_flags & DRIVE_DMA)
1554 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1555 	}
1556 	if (idedma_ctl != 0) {
1557 		/* Add software bits in status register */
1558 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1559 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1560 		    idedma_ctl);
1561 	}
1562 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1563 	pciide_print_modes(cp);
1564 }
1565 
1566 void
1567 piix3_4_setup_channel(chp)
1568 	struct channel_softc *chp;
1569 {
1570 	struct ata_drive_datas *drvp;
1571 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1572 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1573 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1574 	int drive;
1575 	int channel = chp->channel;
1576 
1577 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1578 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1579 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1580 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1581 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1582 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1583 	    PIIX_SIDETIM_RTC_MASK(channel));
1584 
1585 	idedma_ctl = 0;
1586 	/* If channel disabled, no need to go further */
1587 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1588 		return;
1589 	/* set up new idetim: Enable IDE registers decode */
1590 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1591 
1592 	/* setup DMA if needed */
1593 	pciide_channel_dma_setup(cp);
1594 
1595 	for (drive = 0; drive < 2; drive++) {
1596 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1597 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1598 		drvp = &chp->ch_drive[drive];
1599 		/* If no drive, skip */
1600 		if ((drvp->drive_flags & DRIVE) == 0)
1601 			continue;
1602 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1603 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1604 			goto pio;
1605 
1606 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1607 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1608 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1609 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1610 			ideconf |= PIIX_CONFIG_PINGPONG;
1611 		}
1612 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1613 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1614 			/* setup Ultra/100 */
1615 			if (drvp->UDMA_mode > 2 &&
1616 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1617 				drvp->UDMA_mode = 2;
1618 			if (drvp->UDMA_mode > 4) {
1619 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1620 			} else {
1621 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1622 				if (drvp->UDMA_mode > 2) {
1623 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1624 					    drive);
1625 				} else {
1626 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1627 					    drive);
1628 				}
1629 			}
1630 		}
1631 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1632 			/* setup Ultra/66 */
1633 			if (drvp->UDMA_mode > 2 &&
1634 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1635 				drvp->UDMA_mode = 2;
1636 			if (drvp->UDMA_mode > 2)
1637 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1638 			else
1639 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1640 		}
1641 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1642 		    (drvp->drive_flags & DRIVE_UDMA)) {
1643 			/* use Ultra/DMA */
1644 			drvp->drive_flags &= ~DRIVE_DMA;
1645 			udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1646 			udmareg |= PIIX_UDMATIM_SET(
1647 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1648 		} else {
1649 			/* use Multiword DMA */
1650 			drvp->drive_flags &= ~DRIVE_UDMA;
1651 			if (drive == 0) {
1652 				idetim |= piix_setup_idetim_timings(
1653 				    drvp->DMA_mode, 1, channel);
1654 			} else {
1655 				sidetim |= piix_setup_sidetim_timings(
1656 					drvp->DMA_mode, 1, channel);
1657 				idetim =PIIX_IDETIM_SET(idetim,
1658 				    PIIX_IDETIM_SITRE, channel);
1659 			}
1660 		}
1661 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1662 
1663 pio:		/* use PIO mode */
1664 		idetim |= piix_setup_idetim_drvs(drvp);
1665 		if (drive == 0) {
1666 			idetim |= piix_setup_idetim_timings(
1667 			    drvp->PIO_mode, 0, channel);
1668 		} else {
1669 			sidetim |= piix_setup_sidetim_timings(
1670 				drvp->PIO_mode, 0, channel);
1671 			idetim =PIIX_IDETIM_SET(idetim,
1672 			    PIIX_IDETIM_SITRE, channel);
1673 		}
1674 	}
1675 	if (idedma_ctl != 0) {
1676 		/* Add software bits in status register */
1677 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1678 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1679 		    idedma_ctl);
1680 	}
1681 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1682 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1683 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1684 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1685 	pciide_print_modes(cp);
1686 }
1687 
1688 
1689 /* setup ISP and RTC fields, based on mode */
1690 static u_int32_t
1691 piix_setup_idetim_timings(mode, dma, channel)
1692 	u_int8_t mode;
1693 	u_int8_t dma;
1694 	u_int8_t channel;
1695 {
1696 
1697 	if (dma)
1698 		return PIIX_IDETIM_SET(0,
1699 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1700 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1701 		    channel);
1702 	else
1703 		return PIIX_IDETIM_SET(0,
1704 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1705 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1706 		    channel);
1707 }
1708 
1709 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1710 static u_int32_t
1711 piix_setup_idetim_drvs(drvp)
1712 	struct ata_drive_datas *drvp;
1713 {
1714 	u_int32_t ret = 0;
1715 	struct channel_softc *chp = drvp->chnl_softc;
1716 	u_int8_t channel = chp->channel;
1717 	u_int8_t drive = drvp->drive;
1718 
1719 	/*
1720 	 * If drive is using UDMA, timings setups are independant
1721 	 * So just check DMA and PIO here.
1722 	 */
1723 	if (drvp->drive_flags & DRIVE_DMA) {
1724 		/* if mode = DMA mode 0, use compatible timings */
1725 		if ((drvp->drive_flags & DRIVE_DMA) &&
1726 		    drvp->DMA_mode == 0) {
1727 			drvp->PIO_mode = 0;
1728 			return ret;
1729 		}
1730 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1731 		/*
1732 		 * PIO and DMA timings are the same, use fast timings for PIO
1733 		 * too, else use compat timings.
1734 		 */
1735 		if ((piix_isp_pio[drvp->PIO_mode] !=
1736 		    piix_isp_dma[drvp->DMA_mode]) ||
1737 		    (piix_rtc_pio[drvp->PIO_mode] !=
1738 		    piix_rtc_dma[drvp->DMA_mode]))
1739 			drvp->PIO_mode = 0;
1740 		/* if PIO mode <= 2, use compat timings for PIO */
1741 		if (drvp->PIO_mode <= 2) {
1742 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1743 			    channel);
1744 			return ret;
1745 		}
1746 	}
1747 
1748 	/*
1749 	 * Now setup PIO modes. If mode < 2, use compat timings.
1750 	 * Else enable fast timings. Enable IORDY and prefetch/post
1751 	 * if PIO mode >= 3.
1752 	 */
1753 
1754 	if (drvp->PIO_mode < 2)
1755 		return ret;
1756 
1757 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1758 	if (drvp->PIO_mode >= 3) {
1759 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1760 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1761 	}
1762 	return ret;
1763 }
1764 
1765 /* setup values in SIDETIM registers, based on mode */
1766 static u_int32_t
1767 piix_setup_sidetim_timings(mode, dma, channel)
1768 	u_int8_t mode;
1769 	u_int8_t dma;
1770 	u_int8_t channel;
1771 {
1772 	if (dma)
1773 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1774 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1775 	else
1776 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1777 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1778 }
1779 
1780 void
1781 amd756_chip_map(sc, pa)
1782 	struct pciide_softc *sc;
1783 	struct pci_attach_args *pa;
1784 {
1785 	struct pciide_channel *cp;
1786 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1787 	int channel;
1788 	pcireg_t chanenable;
1789 	bus_size_t cmdsize, ctlsize;
1790 
1791 	if (pciide_chipen(sc, pa) == 0)
1792 		return;
1793 	printf("%s: bus-master DMA support present",
1794 	    sc->sc_wdcdev.sc_dev.dv_xname);
1795 	pciide_mapreg_dma(sc, pa);
1796 	printf("\n");
1797 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1798 	    WDC_CAPABILITY_MODE;
1799 	if (sc->sc_dma_ok) {
1800 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1801 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1802 		sc->sc_wdcdev.irqack = pciide_irqack;
1803 	}
1804 	sc->sc_wdcdev.PIO_cap = 4;
1805 	sc->sc_wdcdev.DMA_cap = 2;
1806 	sc->sc_wdcdev.UDMA_cap = 4;
1807 	sc->sc_wdcdev.set_modes = amd756_setup_channel;
1808 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1809 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1810 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1811 
1812 	WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1813 	    DEBUG_PROBE);
1814 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1815 		cp = &sc->pciide_channels[channel];
1816 		if (pciide_chansetup(sc, channel, interface) == 0)
1817 			continue;
1818 
1819 		if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1820 			printf("%s: %s channel ignored (disabled)\n",
1821 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1822 			continue;
1823 		}
1824 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1825 		    pciide_pci_intr);
1826 
1827 		if (pciide_chan_candisable(cp))
1828 			chanenable &= ~AMD756_CHAN_EN(channel);
1829 		pciide_map_compat_intr(pa, cp, channel, interface);
1830 		if (cp->hw_ok == 0)
1831 			continue;
1832 
1833 		amd756_setup_channel(&cp->wdc_channel);
1834 	}
1835 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1836 	    chanenable);
1837 	return;
1838 }
1839 
1840 void
1841 amd756_setup_channel(chp)
1842 	struct channel_softc *chp;
1843 {
1844 	u_int32_t udmatim_reg, datatim_reg;
1845 	u_int8_t idedma_ctl;
1846 	int mode, drive;
1847 	struct ata_drive_datas *drvp;
1848 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1849 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1850 #ifndef PCIIDE_AMD756_ENABLEDMA
1851 	int rev = PCI_REVISION(
1852 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1853 #endif
1854 
1855 	idedma_ctl = 0;
1856 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1857 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1858 	datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1859 	udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1860 
1861 	/* setup DMA if needed */
1862 	pciide_channel_dma_setup(cp);
1863 
1864 	for (drive = 0; drive < 2; drive++) {
1865 		drvp = &chp->ch_drive[drive];
1866 		/* If no drive, skip */
1867 		if ((drvp->drive_flags & DRIVE) == 0)
1868 			continue;
1869 		/* add timing values, setup DMA if needed */
1870 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1871 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1872 			mode = drvp->PIO_mode;
1873 			goto pio;
1874 		}
1875 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1876 		    (drvp->drive_flags & DRIVE_UDMA)) {
1877 			/* use Ultra/DMA */
1878 			drvp->drive_flags &= ~DRIVE_DMA;
1879 			udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1880 			    AMD756_UDMA_EN_MTH(chp->channel, drive) |
1881 			    AMD756_UDMA_TIME(chp->channel, drive,
1882 				amd756_udma_tim[drvp->UDMA_mode]);
1883 			/* can use PIO timings, MW DMA unused */
1884 			mode = drvp->PIO_mode;
1885 		} else {
1886 			/* use Multiword DMA, but only if revision is OK */
1887 			drvp->drive_flags &= ~DRIVE_UDMA;
1888 #ifndef PCIIDE_AMD756_ENABLEDMA
1889 			/*
1890 			 * The workaround doesn't seem to be necessary
1891 			 * with all drives, so it can be disabled by
1892 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1893 			 * triggered.
1894 			 */
1895 			if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1896 				printf("%s:%d:%d: multi-word DMA disabled due "
1897 				    "to chip revision\n",
1898 				    sc->sc_wdcdev.sc_dev.dv_xname,
1899 				    chp->channel, drive);
1900 				mode = drvp->PIO_mode;
1901 				drvp->drive_flags &= ~DRIVE_DMA;
1902 				goto pio;
1903 			}
1904 #endif
1905 			/* mode = min(pio, dma+2) */
1906 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1907 				mode = drvp->PIO_mode;
1908 			else
1909 				mode = drvp->DMA_mode + 2;
1910 		}
1911 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1912 
1913 pio:		/* setup PIO mode */
1914 		if (mode <= 2) {
1915 			drvp->DMA_mode = 0;
1916 			drvp->PIO_mode = 0;
1917 			mode = 0;
1918 		} else {
1919 			drvp->PIO_mode = mode;
1920 			drvp->DMA_mode = mode - 2;
1921 		}
1922 		datatim_reg |=
1923 		    AMD756_DATATIM_PULSE(chp->channel, drive,
1924 			amd756_pio_set[mode]) |
1925 		    AMD756_DATATIM_RECOV(chp->channel, drive,
1926 			amd756_pio_rec[mode]);
1927 	}
1928 	if (idedma_ctl != 0) {
1929 		/* Add software bits in status register */
1930 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1931 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1932 		    idedma_ctl);
1933 	}
1934 	pciide_print_modes(cp);
1935 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1936 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1937 }
1938 
1939 void
1940 apollo_chip_map(sc, pa)
1941 	struct pciide_softc *sc;
1942 	struct pci_attach_args *pa;
1943 {
1944 	struct pciide_channel *cp;
1945 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1946 	int rev = PCI_REVISION(pa->pa_class);
1947 	int channel;
1948 	u_int32_t ideconf, udma_conf, old_udma_conf;
1949 	bus_size_t cmdsize, ctlsize;
1950 
1951 	if (pciide_chipen(sc, pa) == 0)
1952 		return;
1953 	printf("%s: bus-master DMA support present",
1954 	    sc->sc_wdcdev.sc_dev.dv_xname);
1955 	pciide_mapreg_dma(sc, pa);
1956 	printf("\n");
1957 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1958 	    WDC_CAPABILITY_MODE;
1959 	if (sc->sc_dma_ok) {
1960 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1961 		sc->sc_wdcdev.irqack = pciide_irqack;
1962 		if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1963 		    && rev >= 6)
1964 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1965 	}
1966 	sc->sc_wdcdev.PIO_cap = 4;
1967 	sc->sc_wdcdev.DMA_cap = 2;
1968 	sc->sc_wdcdev.UDMA_cap = 2;
1969 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
1970 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1971 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1972 
1973 	old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1974 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
1975 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
1976 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
1977 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
1978 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
1979 	    old_udma_conf),
1980 	    DEBUG_PROBE);
1981 	pci_conf_write(sc->sc_pc, sc->sc_tag,
1982 	    old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1983 	    APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
1984 	    APO_UDMA);
1985 	udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
1986 	WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
1987 	    DEBUG_PROBE);
1988 	if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1989 	    APO_UDMA_EN_MTH(0, 0))) ==
1990 	    (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
1991 	    APO_UDMA_EN_MTH(0, 0))) {
1992 		if ((udma_conf & APO_UDMA_CLK66(0)) ==
1993 		    APO_UDMA_CLK66(0)) {
1994 			printf("%s: Ultra/66 capable\n",
1995 			    sc->sc_wdcdev.sc_dev.dv_xname);
1996 			sc->sc_wdcdev.UDMA_cap = 4;
1997 		} else {
1998 			printf("%s: Ultra/33 capable\n",
1999 			    sc->sc_wdcdev.sc_dev.dv_xname);
2000 			sc->sc_wdcdev.UDMA_cap = 2;
2001 		}
2002 	} else {
2003 		sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2004 	}
2005 	pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2006 
2007 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2008 		cp = &sc->pciide_channels[channel];
2009 		if (pciide_chansetup(sc, channel, interface) == 0)
2010 			continue;
2011 
2012 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2013 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2014 			printf("%s: %s channel ignored (disabled)\n",
2015 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2016 			continue;
2017 		}
2018 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2019 		    pciide_pci_intr);
2020 		if (cp->hw_ok == 0)
2021 			continue;
2022 		if (pciide_chan_candisable(cp)) {
2023 			ideconf &= ~APO_IDECONF_EN(channel);
2024 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2025 			    ideconf);
2026 		}
2027 		pciide_map_compat_intr(pa, cp, channel, interface);
2028 
2029 		if (cp->hw_ok == 0)
2030 			continue;
2031 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2032 	}
2033 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2034 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2035 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2036 }
2037 
2038 void
2039 apollo_setup_channel(chp)
2040 	struct channel_softc *chp;
2041 {
2042 	u_int32_t udmatim_reg, datatim_reg;
2043 	u_int8_t idedma_ctl;
2044 	int mode, drive;
2045 	struct ata_drive_datas *drvp;
2046 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2047 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2048 
2049 	idedma_ctl = 0;
2050 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2051 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2052 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2053 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2054 
2055 	/* setup DMA if needed */
2056 	pciide_channel_dma_setup(cp);
2057 
2058 	/*
2059 	 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2060 	 * downgrade to Ultra/33 if needed
2061 	 */
2062 	if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2063 	    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2064 		/* both drives UDMA */
2065 		if (chp->ch_drive[0].UDMA_mode > 2 &&
2066 		    chp->ch_drive[1].UDMA_mode <= 2) {
2067 			/* drive 0 Ultra/66, drive 1 Ultra/33 */
2068 			chp->ch_drive[0].UDMA_mode = 2;
2069 		} else if (chp->ch_drive[1].UDMA_mode > 2 &&
2070 		    chp->ch_drive[0].UDMA_mode <= 2) {
2071 			/* drive 1 Ultra/66, drive 0 Ultra/33 */
2072 			chp->ch_drive[1].UDMA_mode = 2;
2073 		}
2074 	}
2075 
2076 	for (drive = 0; drive < 2; drive++) {
2077 		drvp = &chp->ch_drive[drive];
2078 		/* If no drive, skip */
2079 		if ((drvp->drive_flags & DRIVE) == 0)
2080 			continue;
2081 		/* add timing values, setup DMA if needed */
2082 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2083 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2084 			mode = drvp->PIO_mode;
2085 			goto pio;
2086 		}
2087 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2088 		    (drvp->drive_flags & DRIVE_UDMA)) {
2089 			/* use Ultra/DMA */
2090 			drvp->drive_flags &= ~DRIVE_DMA;
2091 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2092 			    APO_UDMA_EN_MTH(chp->channel, drive) |
2093 			    APO_UDMA_TIME(chp->channel, drive,
2094 				apollo_udma_tim[drvp->UDMA_mode]);
2095 			if (drvp->UDMA_mode > 2)
2096 				udmatim_reg |=
2097 				    APO_UDMA_CLK66(chp->channel);
2098 			/* can use PIO timings, MW DMA unused */
2099 			mode = drvp->PIO_mode;
2100 		} else {
2101 			/* use Multiword DMA */
2102 			drvp->drive_flags &= ~DRIVE_UDMA;
2103 			/* mode = min(pio, dma+2) */
2104 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2105 				mode = drvp->PIO_mode;
2106 			else
2107 				mode = drvp->DMA_mode + 2;
2108 		}
2109 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2110 
2111 pio:		/* setup PIO mode */
2112 		if (mode <= 2) {
2113 			drvp->DMA_mode = 0;
2114 			drvp->PIO_mode = 0;
2115 			mode = 0;
2116 		} else {
2117 			drvp->PIO_mode = mode;
2118 			drvp->DMA_mode = mode - 2;
2119 		}
2120 		datatim_reg |=
2121 		    APO_DATATIM_PULSE(chp->channel, drive,
2122 			apollo_pio_set[mode]) |
2123 		    APO_DATATIM_RECOV(chp->channel, drive,
2124 			apollo_pio_rec[mode]);
2125 	}
2126 	if (idedma_ctl != 0) {
2127 		/* Add software bits in status register */
2128 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2129 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2130 		    idedma_ctl);
2131 	}
2132 	pciide_print_modes(cp);
2133 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2134 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2135 }
2136 
2137 void
2138 cmd_channel_map(pa, sc, channel)
2139 	struct pci_attach_args *pa;
2140 	struct pciide_softc *sc;
2141 	int channel;
2142 {
2143 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2144 	bus_size_t cmdsize, ctlsize;
2145 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2146 	int interface;
2147 
2148 	/*
2149 	 * The 0648/0649 can be told to identify as a RAID controller.
2150 	 * In this case, we have to fake interface
2151 	 */
2152 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2153 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2154 		    PCIIDE_INTERFACE_SETTABLE(1);
2155 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2156 		    CMD_CONF_DSA1)
2157 			interface |= PCIIDE_INTERFACE_PCI(0) |
2158 			    PCIIDE_INTERFACE_PCI(1);
2159 	} else {
2160 		interface = PCI_INTERFACE(pa->pa_class);
2161 	}
2162 
2163 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2164 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2165 	cp->wdc_channel.channel = channel;
2166 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2167 
2168 	if (channel > 0) {
2169 		cp->wdc_channel.ch_queue =
2170 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2171 	} else {
2172 		cp->wdc_channel.ch_queue =
2173 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2174 	}
2175 	if (cp->wdc_channel.ch_queue == NULL) {
2176 		printf("%s %s channel: "
2177 		    "can't allocate memory for command queue",
2178 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2179 		    return;
2180 	}
2181 
2182 	printf("%s: %s channel %s to %s mode\n",
2183 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2184 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2185 	    "configured" : "wired",
2186 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2187 	    "native-PCI" : "compatibility");
2188 
2189 	/*
2190 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2191 	 * there's no way to disable the first channel without disabling
2192 	 * the whole device
2193 	 */
2194 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2195 		printf("%s: %s channel ignored (disabled)\n",
2196 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2197 		return;
2198 	}
2199 
2200 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2201 	if (cp->hw_ok == 0)
2202 		return;
2203 	if (channel == 1) {
2204 		if (pciide_chan_candisable(cp)) {
2205 			ctrl &= ~CMD_CTRL_2PORT;
2206 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2207 			    CMD_CTRL, ctrl);
2208 		}
2209 	}
2210 	pciide_map_compat_intr(pa, cp, channel, interface);
2211 }
2212 
2213 int
2214 cmd_pci_intr(arg)
2215 	void *arg;
2216 {
2217 	struct pciide_softc *sc = arg;
2218 	struct pciide_channel *cp;
2219 	struct channel_softc *wdc_cp;
2220 	int i, rv, crv;
2221 	u_int32_t priirq, secirq;
2222 
2223 	rv = 0;
2224 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2225 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2226 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2227 		cp = &sc->pciide_channels[i];
2228 		wdc_cp = &cp->wdc_channel;
2229 		/* If a compat channel skip. */
2230 		if (cp->compat)
2231 			continue;
2232 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2233 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2234 			crv = wdcintr(wdc_cp);
2235 			if (crv == 0)
2236 				printf("%s:%d: bogus intr\n",
2237 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2238 			else
2239 				rv = 1;
2240 		}
2241 	}
2242 	return rv;
2243 }
2244 
2245 void
2246 cmd_chip_map(sc, pa)
2247 	struct pciide_softc *sc;
2248 	struct pci_attach_args *pa;
2249 {
2250 	int channel;
2251 
2252 	/*
2253 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2254 	 * and base adresses registers can be disabled at
2255 	 * hardware level. In this case, the device is wired
2256 	 * in compat mode and its first channel is always enabled,
2257 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2258 	 * In fact, it seems that the first channel of the CMD PCI0640
2259 	 * can't be disabled.
2260 	 */
2261 
2262 #ifdef PCIIDE_CMD064x_DISABLE
2263 	if (pciide_chipen(sc, pa) == 0)
2264 		return;
2265 #endif
2266 
2267 	printf("%s: hardware does not support DMA\n",
2268 	    sc->sc_wdcdev.sc_dev.dv_xname);
2269 	sc->sc_dma_ok = 0;
2270 
2271 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2272 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2273 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2274 
2275 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2276 		cmd_channel_map(pa, sc, channel);
2277 	}
2278 }
2279 
2280 void
2281 cmd0643_9_chip_map(sc, pa)
2282 	struct pciide_softc *sc;
2283 	struct pci_attach_args *pa;
2284 {
2285 	struct pciide_channel *cp;
2286 	int channel;
2287 	int rev = PCI_REVISION(
2288 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2289 
2290 	/*
2291 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2292 	 * and base adresses registers can be disabled at
2293 	 * hardware level. In this case, the device is wired
2294 	 * in compat mode and its first channel is always enabled,
2295 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2296 	 * In fact, it seems that the first channel of the CMD PCI0640
2297 	 * can't be disabled.
2298 	 */
2299 
2300 #ifdef PCIIDE_CMD064x_DISABLE
2301 	if (pciide_chipen(sc, pa) == 0)
2302 		return;
2303 #endif
2304 	printf("%s: bus-master DMA support present",
2305 	    sc->sc_wdcdev.sc_dev.dv_xname);
2306 	pciide_mapreg_dma(sc, pa);
2307 	printf("\n");
2308 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2309 	    WDC_CAPABILITY_MODE;
2310 	if (sc->sc_dma_ok) {
2311 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2312 		switch (sc->sc_pp->ide_product) {
2313 		case PCI_PRODUCT_CMDTECH_649:
2314 		case PCI_PRODUCT_CMDTECH_648:
2315 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2316 			sc->sc_wdcdev.UDMA_cap = 4;
2317 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2318 			break;
2319 		case PCI_PRODUCT_CMDTECH_646:
2320 			if (rev >= CMD0646U2_REV) {
2321 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2322 				sc->sc_wdcdev.UDMA_cap = 2;
2323 			} else if (rev >= CMD0646U_REV) {
2324 			/*
2325 			 * Linux's driver claims that the 646U is broken
2326 			 * with UDMA. Only enable it if we know what we're
2327 			 * doing
2328 			 */
2329 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2330 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2331 				sc->sc_wdcdev.UDMA_cap = 2;
2332 #endif
2333 				/* explicitely disable UDMA */
2334 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2335 				    CMD_UDMATIM(0), 0);
2336 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2337 				    CMD_UDMATIM(1), 0);
2338 			}
2339 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2340 			break;
2341 		default:
2342 			sc->sc_wdcdev.irqack = pciide_irqack;
2343 		}
2344 	}
2345 
2346 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2347 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2348 	sc->sc_wdcdev.PIO_cap = 4;
2349 	sc->sc_wdcdev.DMA_cap = 2;
2350 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2351 
2352 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2353 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2354 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2355 		DEBUG_PROBE);
2356 
2357 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2358 		cp = &sc->pciide_channels[channel];
2359 		cmd_channel_map(pa, sc, channel);
2360 		if (cp->hw_ok == 0)
2361 			continue;
2362 		cmd0643_9_setup_channel(&cp->wdc_channel);
2363 	}
2364 	/*
2365 	 * note - this also makes sure we clear the irq disable and reset
2366 	 * bits
2367 	 */
2368 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2369 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2370 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2371 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2372 	    DEBUG_PROBE);
2373 }
2374 
2375 void
2376 cmd0643_9_setup_channel(chp)
2377 	struct channel_softc *chp;
2378 {
2379 	struct ata_drive_datas *drvp;
2380 	u_int8_t tim;
2381 	u_int32_t idedma_ctl, udma_reg;
2382 	int drive;
2383 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2384 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2385 
2386 	idedma_ctl = 0;
2387 	/* setup DMA if needed */
2388 	pciide_channel_dma_setup(cp);
2389 
2390 	for (drive = 0; drive < 2; drive++) {
2391 		drvp = &chp->ch_drive[drive];
2392 		/* If no drive, skip */
2393 		if ((drvp->drive_flags & DRIVE) == 0)
2394 			continue;
2395 		/* add timing values, setup DMA if needed */
2396 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2397 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2398 			if (drvp->drive_flags & DRIVE_UDMA) {
2399 				/* UltraDMA on a 646U2, 0648 or 0649 */
2400 				drvp->drive_flags &= ~DRIVE_DMA;
2401 				udma_reg = pciide_pci_read(sc->sc_pc,
2402 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2403 				if (drvp->UDMA_mode > 2 &&
2404 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2405 				    CMD_BICSR) &
2406 				    CMD_BICSR_80(chp->channel)) == 0)
2407 					drvp->UDMA_mode = 2;
2408 				if (drvp->UDMA_mode > 2)
2409 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2410 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2411 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2412 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2413 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2414 				    CMD_UDMATIM_TIM_OFF(drive));
2415 				udma_reg |=
2416 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2417 				    CMD_UDMATIM_TIM_OFF(drive));
2418 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2419 				    CMD_UDMATIM(chp->channel), udma_reg);
2420 			} else {
2421 				/*
2422 				 * use Multiword DMA.
2423 				 * Timings will be used for both PIO and DMA,
2424 				 * so adjust DMA mode if needed
2425 				 * if we have a 0646U2/8/9, turn off UDMA
2426 				 */
2427 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2428 					udma_reg = pciide_pci_read(sc->sc_pc,
2429 					    sc->sc_tag,
2430 					    CMD_UDMATIM(chp->channel));
2431 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2432 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2433 					    CMD_UDMATIM(chp->channel),
2434 					    udma_reg);
2435 				}
2436 				if (drvp->PIO_mode >= 3 &&
2437 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2438 					drvp->DMA_mode = drvp->PIO_mode - 2;
2439 				}
2440 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2441 			}
2442 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2443 		}
2444 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2445 		    CMD_DATA_TIM(chp->channel, drive), tim);
2446 	}
2447 	if (idedma_ctl != 0) {
2448 		/* Add software bits in status register */
2449 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2450 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2451 		    idedma_ctl);
2452 	}
2453 	pciide_print_modes(cp);
2454 }
2455 
2456 void
2457 cmd646_9_irqack(chp)
2458 	struct channel_softc *chp;
2459 {
2460 	u_int32_t priirq, secirq;
2461 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2462 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2463 
2464 	if (chp->channel == 0) {
2465 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2466 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2467 	} else {
2468 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2469 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2470 	}
2471 	pciide_irqack(chp);
2472 }
2473 
2474 void
2475 cy693_chip_map(sc, pa)
2476 	struct pciide_softc *sc;
2477 	struct pci_attach_args *pa;
2478 {
2479 	struct pciide_channel *cp;
2480 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2481 	bus_size_t cmdsize, ctlsize;
2482 
2483 	if (pciide_chipen(sc, pa) == 0)
2484 		return;
2485 	/*
2486 	 * this chip has 2 PCI IDE functions, one for primary and one for
2487 	 * secondary. So we need to call pciide_mapregs_compat() with
2488 	 * the real channel
2489 	 */
2490 	if (pa->pa_function == 1) {
2491 		sc->sc_cy_compatchan = 0;
2492 	} else if (pa->pa_function == 2) {
2493 		sc->sc_cy_compatchan = 1;
2494 	} else {
2495 		printf("%s: unexpected PCI function %d\n",
2496 		    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2497 		return;
2498 	}
2499 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2500 		printf("%s: bus-master DMA support present",
2501 		    sc->sc_wdcdev.sc_dev.dv_xname);
2502 		pciide_mapreg_dma(sc, pa);
2503 	} else {
2504 		printf("%s: hardware does not support DMA",
2505 		    sc->sc_wdcdev.sc_dev.dv_xname);
2506 		sc->sc_dma_ok = 0;
2507 	}
2508 	printf("\n");
2509 
2510 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2511 	if (sc->sc_cy_handle == NULL) {
2512 		printf("%s: unable to map hyperCache control registers\n",
2513 		    sc->sc_wdcdev.sc_dev.dv_xname);
2514 		sc->sc_dma_ok = 0;
2515 	}
2516 
2517 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2518 	    WDC_CAPABILITY_MODE;
2519 	if (sc->sc_dma_ok) {
2520 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2521 		sc->sc_wdcdev.irqack = pciide_irqack;
2522 	}
2523 	sc->sc_wdcdev.PIO_cap = 4;
2524 	sc->sc_wdcdev.DMA_cap = 2;
2525 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2526 
2527 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2528 	sc->sc_wdcdev.nchannels = 1;
2529 
2530 	/* Only one channel for this chip; if we are here it's enabled */
2531 	cp = &sc->pciide_channels[0];
2532 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2533 	cp->name = PCIIDE_CHANNEL_NAME(0);
2534 	cp->wdc_channel.channel = 0;
2535 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2536 	cp->wdc_channel.ch_queue =
2537 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2538 	if (cp->wdc_channel.ch_queue == NULL) {
2539 		printf("%s primary channel: "
2540 		    "can't allocate memory for command queue",
2541 		sc->sc_wdcdev.sc_dev.dv_xname);
2542 		return;
2543 	}
2544 	printf("%s: primary channel %s to ",
2545 	    sc->sc_wdcdev.sc_dev.dv_xname,
2546 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2547 	    "configured" : "wired");
2548 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2549 		printf("native-PCI");
2550 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2551 		    pciide_pci_intr);
2552 	} else {
2553 		printf("compatibility");
2554 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2555 		    &cmdsize, &ctlsize);
2556 	}
2557 	printf(" mode\n");
2558 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2559 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2560 	wdcattach(&cp->wdc_channel);
2561 	if (pciide_chan_candisable(cp)) {
2562 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2563 		    PCI_COMMAND_STATUS_REG, 0);
2564 	}
2565 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2566 	if (cp->hw_ok == 0)
2567 		return;
2568 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2569 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2570 	cy693_setup_channel(&cp->wdc_channel);
2571 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2572 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2573 }
2574 
2575 void
2576 cy693_setup_channel(chp)
2577 	struct channel_softc *chp;
2578 {
2579 	struct ata_drive_datas *drvp;
2580 	int drive;
2581 	u_int32_t cy_cmd_ctrl;
2582 	u_int32_t idedma_ctl;
2583 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2584 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2585 	int dma_mode = -1;
2586 
2587 	cy_cmd_ctrl = idedma_ctl = 0;
2588 
2589 	/* setup DMA if needed */
2590 	pciide_channel_dma_setup(cp);
2591 
2592 	for (drive = 0; drive < 2; drive++) {
2593 		drvp = &chp->ch_drive[drive];
2594 		/* If no drive, skip */
2595 		if ((drvp->drive_flags & DRIVE) == 0)
2596 			continue;
2597 		/* add timing values, setup DMA if needed */
2598 		if (drvp->drive_flags & DRIVE_DMA) {
2599 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2600 			/* use Multiword DMA */
2601 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2602 				dma_mode = drvp->DMA_mode;
2603 		}
2604 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2605 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2606 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2607 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2608 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2609 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2610 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2611 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2612 	}
2613 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2614 	chp->ch_drive[0].DMA_mode = dma_mode;
2615 	chp->ch_drive[1].DMA_mode = dma_mode;
2616 
2617 	if (dma_mode == -1)
2618 		dma_mode = 0;
2619 
2620 	if (sc->sc_cy_handle != NULL) {
2621 		/* Note: `multiple' is implied. */
2622 		cy82c693_write(sc->sc_cy_handle,
2623 		    (sc->sc_cy_compatchan == 0) ?
2624 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2625 	}
2626 
2627 	pciide_print_modes(cp);
2628 
2629 	if (idedma_ctl != 0) {
2630 		/* Add software bits in status register */
2631 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2632 		    IDEDMA_CTL, idedma_ctl);
2633 	}
2634 }
2635 
2636 void
2637 sis_chip_map(sc, pa)
2638 	struct pciide_softc *sc;
2639 	struct pci_attach_args *pa;
2640 {
2641 	struct pciide_channel *cp;
2642 	int channel;
2643 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2644 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2645 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2646 	bus_size_t cmdsize, ctlsize;
2647 
2648 	if (pciide_chipen(sc, pa) == 0)
2649 		return;
2650 	printf("%s: bus-master DMA support present",
2651 	    sc->sc_wdcdev.sc_dev.dv_xname);
2652 	pciide_mapreg_dma(sc, pa);
2653 	printf("\n");
2654 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2655 	    WDC_CAPABILITY_MODE;
2656 	if (sc->sc_dma_ok) {
2657 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2658 		sc->sc_wdcdev.irqack = pciide_irqack;
2659 		if (rev > 0xd0)
2660 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2661 	}
2662 
2663 	sc->sc_wdcdev.PIO_cap = 4;
2664 	sc->sc_wdcdev.DMA_cap = 2;
2665 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2666 		sc->sc_wdcdev.UDMA_cap = 2;
2667 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2668 
2669 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2670 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2671 
2672 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2673 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2674 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2675 
2676 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2677 		cp = &sc->pciide_channels[channel];
2678 		if (pciide_chansetup(sc, channel, interface) == 0)
2679 			continue;
2680 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2681 		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2682 			printf("%s: %s channel ignored (disabled)\n",
2683 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2684 			continue;
2685 		}
2686 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2687 		    pciide_pci_intr);
2688 		if (cp->hw_ok == 0)
2689 			continue;
2690 		if (pciide_chan_candisable(cp)) {
2691 			if (channel == 0)
2692 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2693 			else
2694 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2695 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2696 			    sis_ctr0);
2697 		}
2698 		pciide_map_compat_intr(pa, cp, channel, interface);
2699 		if (cp->hw_ok == 0)
2700 			continue;
2701 		sis_setup_channel(&cp->wdc_channel);
2702 	}
2703 }
2704 
2705 void
2706 sis_setup_channel(chp)
2707 	struct channel_softc *chp;
2708 {
2709 	struct ata_drive_datas *drvp;
2710 	int drive;
2711 	u_int32_t sis_tim;
2712 	u_int32_t idedma_ctl;
2713 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2714 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2715 
2716 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2717 	    "channel %d 0x%x\n", chp->channel,
2718 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2719 	    DEBUG_PROBE);
2720 	sis_tim = 0;
2721 	idedma_ctl = 0;
2722 	/* setup DMA if needed */
2723 	pciide_channel_dma_setup(cp);
2724 
2725 	for (drive = 0; drive < 2; drive++) {
2726 		drvp = &chp->ch_drive[drive];
2727 		/* If no drive, skip */
2728 		if ((drvp->drive_flags & DRIVE) == 0)
2729 			continue;
2730 		/* add timing values, setup DMA if needed */
2731 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2732 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2733 			goto pio;
2734 
2735 		if (drvp->drive_flags & DRIVE_UDMA) {
2736 			/* use Ultra/DMA */
2737 			drvp->drive_flags &= ~DRIVE_DMA;
2738 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2739 			    SIS_TIM_UDMA_TIME_OFF(drive);
2740 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2741 		} else {
2742 			/*
2743 			 * use Multiword DMA
2744 			 * Timings will be used for both PIO and DMA,
2745 			 * so adjust DMA mode if needed
2746 			 */
2747 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2748 				drvp->PIO_mode = drvp->DMA_mode + 2;
2749 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2750 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2751 				    drvp->PIO_mode - 2 : 0;
2752 			if (drvp->DMA_mode == 0)
2753 				drvp->PIO_mode = 0;
2754 		}
2755 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2756 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2757 		    SIS_TIM_ACT_OFF(drive);
2758 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2759 		    SIS_TIM_REC_OFF(drive);
2760 	}
2761 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2762 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2763 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2764 	if (idedma_ctl != 0) {
2765 		/* Add software bits in status register */
2766 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2767 		    IDEDMA_CTL, idedma_ctl);
2768 	}
2769 	pciide_print_modes(cp);
2770 }
2771 
2772 void
2773 acer_chip_map(sc, pa)
2774 	struct pciide_softc *sc;
2775 	struct pci_attach_args *pa;
2776 {
2777 	struct pciide_channel *cp;
2778 	int channel;
2779 	pcireg_t cr, interface;
2780 	bus_size_t cmdsize, ctlsize;
2781 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2782 
2783 	if (pciide_chipen(sc, pa) == 0)
2784 		return;
2785 	printf("%s: bus-master DMA support present",
2786 	    sc->sc_wdcdev.sc_dev.dv_xname);
2787 	pciide_mapreg_dma(sc, pa);
2788 	printf("\n");
2789 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2790 	    WDC_CAPABILITY_MODE;
2791 	if (sc->sc_dma_ok) {
2792 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2793 		if (rev >= 0x20)
2794 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2795 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2796 		sc->sc_wdcdev.irqack = pciide_irqack;
2797 	}
2798 
2799 	sc->sc_wdcdev.PIO_cap = 4;
2800 	sc->sc_wdcdev.DMA_cap = 2;
2801 	sc->sc_wdcdev.UDMA_cap = 2;
2802 	sc->sc_wdcdev.set_modes = acer_setup_channel;
2803 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2804 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2805 
2806 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2807 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2808 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2809 
2810 	/* Enable "microsoft register bits" R/W. */
2811 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2812 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2813 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2814 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2815 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2816 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2817 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2818 	    ~ACER_CHANSTATUSREGS_RO);
2819 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2820 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2821 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2822 	/* Don't use cr, re-read the real register content instead */
2823 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2824 	    PCI_CLASS_REG));
2825 
2826 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2827 		cp = &sc->pciide_channels[channel];
2828 		if (pciide_chansetup(sc, channel, interface) == 0)
2829 			continue;
2830 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2831 			printf("%s: %s channel ignored (disabled)\n",
2832 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2833 			continue;
2834 		}
2835 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2836 		    acer_pci_intr);
2837 		if (cp->hw_ok == 0)
2838 			continue;
2839 		if (pciide_chan_candisable(cp)) {
2840 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2841 			pci_conf_write(sc->sc_pc, sc->sc_tag,
2842 			    PCI_CLASS_REG, cr);
2843 		}
2844 		pciide_map_compat_intr(pa, cp, channel, interface);
2845 		acer_setup_channel(&cp->wdc_channel);
2846 	}
2847 }
2848 
2849 void
2850 acer_setup_channel(chp)
2851 	struct channel_softc *chp;
2852 {
2853 	struct ata_drive_datas *drvp;
2854 	int drive;
2855 	u_int32_t acer_fifo_udma;
2856 	u_int32_t idedma_ctl;
2857 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2858 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2859 
2860 	idedma_ctl = 0;
2861 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2862 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2863 	    acer_fifo_udma), DEBUG_PROBE);
2864 	/* setup DMA if needed */
2865 	pciide_channel_dma_setup(cp);
2866 
2867 	for (drive = 0; drive < 2; drive++) {
2868 		drvp = &chp->ch_drive[drive];
2869 		/* If no drive, skip */
2870 		if ((drvp->drive_flags & DRIVE) == 0)
2871 			continue;
2872 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2873 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
2874 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
2875 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2876 		/* clear FIFO/DMA mode */
2877 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2878 		    ACER_UDMA_EN(chp->channel, drive) |
2879 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
2880 
2881 		/* add timing values, setup DMA if needed */
2882 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2883 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
2884 			acer_fifo_udma |=
2885 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
2886 			goto pio;
2887 		}
2888 
2889 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2890 		if (drvp->drive_flags & DRIVE_UDMA) {
2891 			/* use Ultra/DMA */
2892 			drvp->drive_flags &= ~DRIVE_DMA;
2893 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2894 			acer_fifo_udma |=
2895 			    ACER_UDMA_TIM(chp->channel, drive,
2896 				acer_udma[drvp->UDMA_mode]);
2897 		} else {
2898 			/*
2899 			 * use Multiword DMA
2900 			 * Timings will be used for both PIO and DMA,
2901 			 * so adjust DMA mode if needed
2902 			 */
2903 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2904 				drvp->PIO_mode = drvp->DMA_mode + 2;
2905 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2906 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2907 				    drvp->PIO_mode - 2 : 0;
2908 			if (drvp->DMA_mode == 0)
2909 				drvp->PIO_mode = 0;
2910 		}
2911 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2912 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2913 		    ACER_IDETIM(chp->channel, drive),
2914 		    acer_pio[drvp->PIO_mode]);
2915 	}
2916 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2917 	    acer_fifo_udma), DEBUG_PROBE);
2918 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2919 	if (idedma_ctl != 0) {
2920 		/* Add software bits in status register */
2921 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2922 		    IDEDMA_CTL, idedma_ctl);
2923 	}
2924 	pciide_print_modes(cp);
2925 }
2926 
2927 int
2928 acer_pci_intr(arg)
2929 	void *arg;
2930 {
2931 	struct pciide_softc *sc = arg;
2932 	struct pciide_channel *cp;
2933 	struct channel_softc *wdc_cp;
2934 	int i, rv, crv;
2935 	u_int32_t chids;
2936 
2937 	rv = 0;
2938 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2939 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2940 		cp = &sc->pciide_channels[i];
2941 		wdc_cp = &cp->wdc_channel;
2942 		/* If a compat channel skip. */
2943 		if (cp->compat)
2944 			continue;
2945 		if (chids & ACER_CHIDS_INT(i)) {
2946 			crv = wdcintr(wdc_cp);
2947 			if (crv == 0)
2948 				printf("%s:%d: bogus intr\n",
2949 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2950 			else
2951 				rv = 1;
2952 		}
2953 	}
2954 	return rv;
2955 }
2956 
2957 void
2958 hpt_chip_map(sc, pa)
2959         struct pciide_softc *sc;
2960 	struct pci_attach_args *pa;
2961 {
2962 	struct pciide_channel *cp;
2963 	int i, compatchan, revision;
2964 	pcireg_t interface;
2965 	bus_size_t cmdsize, ctlsize;
2966 
2967 	if (pciide_chipen(sc, pa) == 0)
2968 		return;
2969 	revision = PCI_REVISION(pa->pa_class);
2970 
2971 	/*
2972 	 * when the chip is in native mode it identifies itself as a
2973 	 * 'misc mass storage'. Fake interface in this case.
2974 	 */
2975 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
2976 		interface = PCI_INTERFACE(pa->pa_class);
2977 	} else {
2978 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
2979 		    PCIIDE_INTERFACE_PCI(0);
2980 		if (revision == HPT370_REV)
2981 			interface |= PCIIDE_INTERFACE_PCI(1);
2982 	}
2983 
2984 	printf("%s: bus-master DMA support present",
2985 		sc->sc_wdcdev.sc_dev.dv_xname);
2986 	pciide_mapreg_dma(sc, pa);
2987 	printf("\n");
2988 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2989 	    WDC_CAPABILITY_MODE;
2990 	if (sc->sc_dma_ok) {
2991 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
2992 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2993 		sc->sc_wdcdev.irqack = pciide_irqack;
2994 	}
2995 	sc->sc_wdcdev.PIO_cap = 4;
2996 	sc->sc_wdcdev.DMA_cap = 2;
2997 
2998 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
2999 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3000 	if (revision == HPT366_REV) {
3001 		sc->sc_wdcdev.UDMA_cap = 4;
3002 		/*
3003 		 * The 366 has 2 PCI IDE functions, one for primary and one
3004 		 * for secondary. So we need to call pciide_mapregs_compat()
3005 		 * with the real channel
3006 		 */
3007 		if (pa->pa_function == 0) {
3008 			compatchan = 0;
3009 		} else if (pa->pa_function == 1) {
3010 			compatchan = 1;
3011 		} else {
3012 			printf("%s: unexpected PCI function %d\n",
3013 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3014 			return;
3015 		}
3016 		sc->sc_wdcdev.nchannels = 1;
3017 	} else {
3018 		sc->sc_wdcdev.nchannels = 2;
3019 		sc->sc_wdcdev.UDMA_cap = 5;
3020 	}
3021 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3022 		cp = &sc->pciide_channels[i];
3023 		if (sc->sc_wdcdev.nchannels > 1) {
3024 			compatchan = i;
3025 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3026 			   HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3027 				printf("%s: %s channel ignored (disabled)\n",
3028 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3029 				continue;
3030 			}
3031 		}
3032 		if (pciide_chansetup(sc, i, interface) == 0)
3033 			continue;
3034 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3035 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3036 			    &ctlsize, hpt_pci_intr);
3037 		} else {
3038 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3039 			    &cmdsize, &ctlsize);
3040 		}
3041 		if (cp->hw_ok == 0)
3042 			return;
3043 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3044 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3045 		wdcattach(&cp->wdc_channel);
3046 		hpt_setup_channel(&cp->wdc_channel);
3047 	}
3048 	if (revision == HPT370_REV) {
3049 		/*
3050 		 * HPT370_REV has a bit to disable interrupts, make sure
3051 		 * to clear it
3052 		 */
3053 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3054 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3055 		    ~HPT_CSEL_IRQDIS);
3056 	}
3057 	return;
3058 }
3059 
3060 void
3061 hpt_setup_channel(chp)
3062 	struct channel_softc *chp;
3063 {
3064         struct ata_drive_datas *drvp;
3065 	int drive;
3066 	int cable;
3067 	u_int32_t before, after;
3068 	u_int32_t idedma_ctl;
3069 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3070 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3071 
3072 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3073 
3074 	/* setup DMA if needed */
3075 	pciide_channel_dma_setup(cp);
3076 
3077 	idedma_ctl = 0;
3078 
3079 	/* Per drive settings */
3080 	for (drive = 0; drive < 2; drive++) {
3081 		drvp = &chp->ch_drive[drive];
3082 		/* If no drive, skip */
3083 		if ((drvp->drive_flags & DRIVE) == 0)
3084 			continue;
3085 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3086 					HPT_IDETIM(chp->channel, drive));
3087 
3088                 /* add timing values, setup DMA if needed */
3089                 if (drvp->drive_flags & DRIVE_UDMA) {
3090 			/* use Ultra/DMA */
3091 			drvp->drive_flags &= ~DRIVE_DMA;
3092 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3093 			    drvp->UDMA_mode > 2)
3094 				drvp->UDMA_mode = 2;
3095                         after = (sc->sc_wdcdev.nchannels == 2) ?
3096 			    hpt370_udma[drvp->UDMA_mode] :
3097 			    hpt366_udma[drvp->UDMA_mode];
3098                         idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3099                 } else if (drvp->drive_flags & DRIVE_DMA) {
3100                         /*
3101                          * use Multiword DMA.
3102                          * Timings will be used for both PIO and DMA, so adjust
3103                          * DMA mode if needed
3104                          */
3105                         if (drvp->PIO_mode >= 3 &&
3106                             (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3107                                 drvp->DMA_mode = drvp->PIO_mode - 2;
3108                         }
3109                         after = (sc->sc_wdcdev.nchannels == 2) ?
3110 			    hpt370_dma[drvp->DMA_mode] :
3111 			    hpt366_dma[drvp->DMA_mode];
3112                         idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3113                 } else {
3114 			/* PIO only */
3115                 	after = (sc->sc_wdcdev.nchannels == 2) ?
3116 			    hpt370_pio[drvp->PIO_mode] :
3117 			    hpt366_pio[drvp->PIO_mode];
3118 		}
3119 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3120                     HPT_IDETIM(chp->channel, drive), after);
3121 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3122 		    "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3123 		    after, before), DEBUG_PROBE);
3124 	}
3125 	if (idedma_ctl != 0) {
3126 		/* Add software bits in status register */
3127 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3128 		    IDEDMA_CTL, idedma_ctl);
3129 	}
3130 	pciide_print_modes(cp);
3131 }
3132 
3133 int
3134 hpt_pci_intr(arg)
3135 	void *arg;
3136 {
3137 	struct pciide_softc *sc = arg;
3138 	struct pciide_channel *cp;
3139 	struct channel_softc *wdc_cp;
3140 	int rv = 0;
3141 	int dmastat, i, crv;
3142 
3143 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3144 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3145 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3146 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3147 			continue;
3148 		cp = &sc->pciide_channels[i];
3149 		wdc_cp = &cp->wdc_channel;
3150 		crv = wdcintr(wdc_cp);
3151 		if (crv == 0) {
3152 			printf("%s:%d: bogus intr\n",
3153 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3154 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3155 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3156 		} else
3157 			rv = 1;
3158 	}
3159 	return rv;
3160 }
3161 
3162 
3163 /* A macro to test product */
3164 #define PDC_IS_262(sc)							\
3165 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||	\
3166 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3167 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3168 
3169 void
3170 pdc202xx_chip_map(sc, pa)
3171         struct pciide_softc *sc;
3172 	struct pci_attach_args *pa;
3173 {
3174 	struct pciide_channel *cp;
3175 	int channel;
3176 	pcireg_t interface, st, mode;
3177 	bus_size_t cmdsize, ctlsize;
3178 
3179 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3180 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3181 	    DEBUG_PROBE);
3182 	if (pciide_chipen(sc, pa) == 0)
3183 		return;
3184 
3185 	/* turn off  RAID mode */
3186 	st &= ~PDC2xx_STATE_IDERAID;
3187 
3188 	/*
3189 	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3190 	 * mode. We have to fake interface
3191 	 */
3192 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3193 	if (st & PDC2xx_STATE_NATIVE)
3194 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3195 
3196 	printf("%s: bus-master DMA support present",
3197 	    sc->sc_wdcdev.sc_dev.dv_xname);
3198 	pciide_mapreg_dma(sc, pa);
3199 	printf("\n");
3200 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3201 	    WDC_CAPABILITY_MODE;
3202 	if (sc->sc_dma_ok) {
3203 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3204 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3205 		sc->sc_wdcdev.irqack = pciide_irqack;
3206 	}
3207 	sc->sc_wdcdev.PIO_cap = 4;
3208 	sc->sc_wdcdev.DMA_cap = 2;
3209 	if (PDC_IS_262(sc))
3210 		sc->sc_wdcdev.UDMA_cap = 4;
3211 	else
3212 		sc->sc_wdcdev.UDMA_cap = 2;
3213 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3214 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3215 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3216 
3217 	/* setup failsafe defaults */
3218 	mode = 0;
3219 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3220 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3221 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3222 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3223 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3224 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3225 		    "initial timings  0x%x, now 0x%x\n", channel,
3226 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3227 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3228 		    DEBUG_PROBE);
3229 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3230 		    mode | PDC2xx_TIM_IORDYp);
3231 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3232 		    "initial timings  0x%x, now 0x%x\n", channel,
3233 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3234 		    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3235 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3236 		    mode);
3237 	}
3238 
3239 	mode = PDC2xx_SCR_DMA;
3240 	if (PDC_IS_262(sc)) {
3241 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3242 	} else {
3243 		/* the BIOS set it up this way */
3244 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3245 	}
3246 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3247 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3248 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3249 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3250 	    DEBUG_PROBE);
3251 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3252 
3253 	/* controller initial state register is OK even without BIOS */
3254 	/* Set DMA mode to IDE DMA compatibility */
3255 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3256 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3257 	    DEBUG_PROBE);
3258 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3259 	    mode | 0x1);
3260 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3261 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3262 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3263 	    mode | 0x1);
3264 
3265 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3266 		cp = &sc->pciide_channels[channel];
3267 		if (pciide_chansetup(sc, channel, interface) == 0)
3268 			continue;
3269 		if ((st & (PDC_IS_262(sc) ?
3270 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3271 			printf("%s: %s channel ignored (disabled)\n",
3272 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3273 			continue;
3274 		}
3275 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3276 		    pdc202xx_pci_intr);
3277 		if (cp->hw_ok == 0)
3278 			continue;
3279 		if (pciide_chan_candisable(cp))
3280 			st &= ~(PDC_IS_262(sc) ?
3281 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3282 		pciide_map_compat_intr(pa, cp, channel, interface);
3283 		pdc202xx_setup_channel(&cp->wdc_channel);
3284 	}
3285 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3286 	    DEBUG_PROBE);
3287 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3288 	return;
3289 }
3290 
3291 void
3292 pdc202xx_setup_channel(chp)
3293 	struct channel_softc *chp;
3294 {
3295         struct ata_drive_datas *drvp;
3296 	int drive;
3297 	pcireg_t mode, st;
3298 	u_int32_t idedma_ctl, scr, atapi;
3299 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3300 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3301 	int channel = chp->channel;
3302 
3303 	/* setup DMA if needed */
3304 	pciide_channel_dma_setup(cp);
3305 
3306 	idedma_ctl = 0;
3307 
3308 	/* Per channel settings */
3309 	if (PDC_IS_262(sc)) {
3310 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3311 		    PDC262_U66);
3312 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3313 		/* Trimm UDMA mode */
3314 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3315 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3316 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3317 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3318 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3319 			if (chp->ch_drive[0].UDMA_mode > 2)
3320 				chp->ch_drive[0].UDMA_mode = 2;
3321 			if (chp->ch_drive[1].UDMA_mode > 2)
3322 				chp->ch_drive[1].UDMA_mode = 2;
3323 		}
3324 		/* Set U66 if needed */
3325 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3326 		    chp->ch_drive[0].UDMA_mode > 2) ||
3327 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3328 		    chp->ch_drive[1].UDMA_mode > 2))
3329 			scr |= PDC262_U66_EN(channel);
3330 		else
3331 			scr &= ~PDC262_U66_EN(channel);
3332 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3333 		    PDC262_U66, scr);
3334 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3335 			chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3336 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3337 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3338 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3339 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3340 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3341 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3342 				atapi = 0;
3343 			else
3344 				atapi = PDC262_ATAPI_UDMA;
3345 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3346 			    PDC262_ATAPI(channel), atapi);
3347 		}
3348 	}
3349 	for (drive = 0; drive < 2; drive++) {
3350 		drvp = &chp->ch_drive[drive];
3351 		/* If no drive, skip */
3352 		if ((drvp->drive_flags & DRIVE) == 0)
3353 			continue;
3354 		mode = 0;
3355 		if (drvp->drive_flags & DRIVE_UDMA) {
3356 			/* use Ultra/DMA */
3357 			drvp->drive_flags &= ~DRIVE_DMA;
3358 			mode = PDC2xx_TIM_SET_MB(mode,
3359 			    pdc2xx_udma_mb[drvp->UDMA_mode]);
3360 			mode = PDC2xx_TIM_SET_MC(mode,
3361 			    pdc2xx_udma_mc[drvp->UDMA_mode]);
3362 			drvp->drive_flags &= ~DRIVE_DMA;
3363 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3364 		} else if (drvp->drive_flags & DRIVE_DMA) {
3365 			mode = PDC2xx_TIM_SET_MB(mode,
3366 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3367 			mode = PDC2xx_TIM_SET_MC(mode,
3368 			    pdc2xx_dma_mc[drvp->DMA_mode]);
3369 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3370 		} else {
3371 			mode = PDC2xx_TIM_SET_MB(mode,
3372 			    pdc2xx_dma_mb[0]);
3373 			mode = PDC2xx_TIM_SET_MC(mode,
3374 			    pdc2xx_dma_mc[0]);
3375 		}
3376 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3377 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3378 		if (drvp->drive_flags & DRIVE_ATA)
3379 			mode |= PDC2xx_TIM_PRE;
3380 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3381 		if (drvp->PIO_mode >= 3) {
3382 			mode |= PDC2xx_TIM_IORDY;
3383 			if (drive == 0)
3384 				mode |= PDC2xx_TIM_IORDYp;
3385 		}
3386 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3387 		    "timings 0x%x\n",
3388 		    sc->sc_wdcdev.sc_dev.dv_xname,
3389 		    chp->channel, drive, mode), DEBUG_PROBE);
3390 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3391 		    PDC2xx_TIM(chp->channel, drive), mode);
3392 	}
3393 	if (idedma_ctl != 0) {
3394 		/* Add software bits in status register */
3395 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3396 		    IDEDMA_CTL, idedma_ctl);
3397 	}
3398 	pciide_print_modes(cp);
3399 }
3400 
3401 int
3402 pdc202xx_pci_intr(arg)
3403 	void *arg;
3404 {
3405 	struct pciide_softc *sc = arg;
3406 	struct pciide_channel *cp;
3407 	struct channel_softc *wdc_cp;
3408 	int i, rv, crv;
3409 	u_int32_t scr;
3410 
3411 	rv = 0;
3412 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3413 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3414 		cp = &sc->pciide_channels[i];
3415 		wdc_cp = &cp->wdc_channel;
3416 		/* If a compat channel skip. */
3417 		if (cp->compat)
3418 			continue;
3419 		if (scr & PDC2xx_SCR_INT(i)) {
3420 			crv = wdcintr(wdc_cp);
3421 			if (crv == 0)
3422 				printf("%s:%d: bogus intr\n",
3423 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3424 			else
3425 				rv = 1;
3426 		}
3427 	}
3428 	return rv;
3429 }
3430 
3431 void
3432 opti_chip_map(sc, pa)
3433 	struct pciide_softc *sc;
3434 	struct pci_attach_args *pa;
3435 {
3436 	struct pciide_channel *cp;
3437 	bus_size_t cmdsize, ctlsize;
3438 	pcireg_t interface;
3439 	u_int8_t init_ctrl;
3440 	int channel;
3441 
3442 	if (pciide_chipen(sc, pa) == 0)
3443 		return;
3444 	printf("%s: bus-master DMA support present",
3445 	    sc->sc_wdcdev.sc_dev.dv_xname);
3446 	pciide_mapreg_dma(sc, pa);
3447 	printf("\n");
3448 
3449 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3450 	    WDC_CAPABILITY_MODE;
3451 	sc->sc_wdcdev.PIO_cap = 4;
3452 	if (sc->sc_dma_ok) {
3453 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3454 		sc->sc_wdcdev.irqack = pciide_irqack;
3455 		sc->sc_wdcdev.DMA_cap = 2;
3456 	}
3457 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3458 
3459 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3460 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3461 
3462 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3463 	    OPTI_REG_INIT_CONTROL);
3464 
3465 	interface = PCI_INTERFACE(pa->pa_class);
3466 
3467 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3468 		cp = &sc->pciide_channels[channel];
3469 		if (pciide_chansetup(sc, channel, interface) == 0)
3470 			continue;
3471 		if (channel == 1 &&
3472 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3473 			printf("%s: %s channel ignored (disabled)\n",
3474 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3475 			continue;
3476 		}
3477 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3478 		    pciide_pci_intr);
3479 		if (cp->hw_ok == 0)
3480 			continue;
3481 		pciide_map_compat_intr(pa, cp, channel, interface);
3482 		if (cp->hw_ok == 0)
3483 			continue;
3484 		opti_setup_channel(&cp->wdc_channel);
3485 	}
3486 }
3487 
3488 void
3489 opti_setup_channel(chp)
3490 	struct channel_softc *chp;
3491 {
3492 	struct ata_drive_datas *drvp;
3493 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3494 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3495 	int drive, spd;
3496 	int mode[2];
3497 	u_int8_t rv, mr;
3498 
3499 	/*
3500 	 * The `Delay' and `Address Setup Time' fields of the
3501 	 * Miscellaneous Register are always zero initially.
3502 	 */
3503 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3504 	mr &= ~(OPTI_MISC_DELAY_MASK |
3505 		OPTI_MISC_ADDR_SETUP_MASK |
3506 		OPTI_MISC_INDEX_MASK);
3507 
3508 	/* Prime the control register before setting timing values */
3509 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3510 
3511 	/* Determine the clockrate of the PCIbus the chip is attached to */
3512 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3513 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3514 
3515 	/* setup DMA if needed */
3516 	pciide_channel_dma_setup(cp);
3517 
3518 	for (drive = 0; drive < 2; drive++) {
3519 		drvp = &chp->ch_drive[drive];
3520 		/* If no drive, skip */
3521 		if ((drvp->drive_flags & DRIVE) == 0) {
3522 			mode[drive] = -1;
3523 			continue;
3524 		}
3525 
3526 		if ((drvp->drive_flags & DRIVE_DMA)) {
3527 			/*
3528 			 * Timings will be used for both PIO and DMA,
3529 			 * so adjust DMA mode if needed
3530 			 */
3531 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3532 				drvp->PIO_mode = drvp->DMA_mode + 2;
3533 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3534 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3535 				    drvp->PIO_mode - 2 : 0;
3536 			if (drvp->DMA_mode == 0)
3537 				drvp->PIO_mode = 0;
3538 
3539 			mode[drive] = drvp->DMA_mode + 5;
3540 		} else
3541 			mode[drive] = drvp->PIO_mode;
3542 
3543 		if (drive && mode[0] >= 0 &&
3544 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3545 			/*
3546 			 * Can't have two drives using different values
3547 			 * for `Address Setup Time'.
3548 			 * Slow down the faster drive to compensate.
3549 			 */
3550 			int d = (opti_tim_as[spd][mode[0]] >
3551 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3552 
3553 			mode[d] = mode[1-d];
3554 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3555 			chp->ch_drive[d].DMA_mode = 0;
3556 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3557 		}
3558 	}
3559 
3560 	for (drive = 0; drive < 2; drive++) {
3561 		int m;
3562 		if ((m = mode[drive]) < 0)
3563 			continue;
3564 
3565 		/* Set the Address Setup Time and select appropriate index */
3566 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3567 		rv |= OPTI_MISC_INDEX(drive);
3568 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3569 
3570 		/* Set the pulse width and recovery timing parameters */
3571 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3572 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3573 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3574 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3575 
3576 		/* Set the Enhanced Mode register appropriately */
3577 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3578 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3579 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3580 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3581 	}
3582 
3583 	/* Finally, enable the timings */
3584 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3585 
3586 	pciide_print_modes(cp);
3587 }
3588