xref: /netbsd-src/sys/dev/pci/pciide.c (revision e55cffd8e520e9b03f18a1bd98bb04223e79f69f)
1 /*	$NetBSD: pciide.c,v 1.112 2001/04/21 16:36:37 tsutsui Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 1999 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 
37 /*
38  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Christopher G. Demetriou
51  *	for the NetBSD Project.
52  * 4. The name of the author may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * PCI IDE controller driver.
69  *
70  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71  * sys/dev/pci/ppb.c, revision 1.16).
72  *
73  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75  * 5/16/94" from the PCI SIG.
76  *
77  */
78 
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82 
83 #define DEBUG_DMA   0x01
84 #define DEBUG_XFERS  0x02
85 #define DEBUG_FUNCS  0x08
86 #define DEBUG_PROBE  0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 	if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <machine/endian.h>
102 
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/pciide_acard_reg.h>
119 #include <dev/pci/cy82c693var.h>
120 
121 #include "opt_pciide.h"
122 
123 /* inlines for reading/writing 8-bit PCI registers */
124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
125 					      int));
126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
127 					   int, u_int8_t));
128 
129 static __inline u_int8_t
130 pciide_pci_read(pc, pa, reg)
131 	pci_chipset_tag_t pc;
132 	pcitag_t pa;
133 	int reg;
134 {
135 
136 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
137 	    ((reg & 0x03) * 8) & 0xff);
138 }
139 
140 static __inline void
141 pciide_pci_write(pc, pa, reg, val)
142 	pci_chipset_tag_t pc;
143 	pcitag_t pa;
144 	int reg;
145 	u_int8_t val;
146 {
147 	pcireg_t pcival;
148 
149 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
150 	pcival &= ~(0xff << ((reg & 0x03) * 8));
151 	pcival |= (val << ((reg & 0x03) * 8));
152 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
153 }
154 
155 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
156 
157 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158 void piix_setup_channel __P((struct channel_softc*));
159 void piix3_4_setup_channel __P((struct channel_softc*));
160 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
161 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
162 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163 
164 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void amd756_setup_channel __P((struct channel_softc*));
166 
167 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void apollo_setup_channel __P((struct channel_softc*));
169 
170 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_setup_channel __P((struct channel_softc*));
173 void cmd_channel_map __P((struct pci_attach_args *,
174 			struct pciide_softc *, int));
175 int  cmd_pci_intr __P((void *));
176 void cmd646_9_irqack __P((struct channel_softc *));
177 
178 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void cy693_setup_channel __P((struct channel_softc*));
180 
181 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void sis_setup_channel __P((struct channel_softc*));
183 
184 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void acer_setup_channel __P((struct channel_softc*));
186 int  acer_pci_intr __P((void *));
187 
188 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void pdc202xx_setup_channel __P((struct channel_softc*));
190 int  pdc202xx_pci_intr __P((void *));
191 int  pdc20265_pci_intr __P((void *));
192 
193 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void opti_setup_channel __P((struct channel_softc*));
195 
196 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
197 void hpt_setup_channel __P((struct channel_softc*));
198 int  hpt_pci_intr __P((void *));
199 
200 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
201 void acard_setup_channel __P((struct channel_softc*));
202 int  acard_pci_intr __P((void *));
203 
204 void pciide_channel_dma_setup __P((struct pciide_channel *));
205 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
206 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
207 void pciide_dma_start __P((void*, int, int));
208 int  pciide_dma_finish __P((void*, int, int, int));
209 void pciide_irqack __P((struct channel_softc *));
210 void pciide_print_modes __P((struct pciide_channel *));
211 
212 struct pciide_product_desc {
213 	u_int32_t ide_product;
214 	int ide_flags;
215 	const char *ide_name;
216 	/* map and setup chip, probe drives */
217 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
218 };
219 
220 /* Flags for ide_flags */
221 #define IDE_PCI_CLASS_OVERRIDE	0x0001 /* accept even if class != pciide */
222 #define	IDE_16BIT_IOSPACE	0x0002 /* I/O space BARS ignore upper word */
223 
224 /* Default product description for devices not known from this controller */
225 const struct pciide_product_desc default_product_desc = {
226 	0,
227 	0,
228 	"Generic PCI IDE controller",
229 	default_chip_map,
230 };
231 
232 const struct pciide_product_desc pciide_intel_products[] =  {
233 	{ PCI_PRODUCT_INTEL_82092AA,
234 	  0,
235 	  "Intel 82092AA IDE controller",
236 	  default_chip_map,
237 	},
238 	{ PCI_PRODUCT_INTEL_82371FB_IDE,
239 	  0,
240 	  "Intel 82371FB IDE controller (PIIX)",
241 	  piix_chip_map,
242 	},
243 	{ PCI_PRODUCT_INTEL_82371SB_IDE,
244 	  0,
245 	  "Intel 82371SB IDE Interface (PIIX3)",
246 	  piix_chip_map,
247 	},
248 	{ PCI_PRODUCT_INTEL_82371AB_IDE,
249 	  0,
250 	  "Intel 82371AB IDE controller (PIIX4)",
251 	  piix_chip_map,
252 	},
253 	{ PCI_PRODUCT_INTEL_82440MX_IDE,
254 	  0,
255 	  "Intel 82440MX IDE controller",
256 	  piix_chip_map
257 	},
258 	{ PCI_PRODUCT_INTEL_82801AA_IDE,
259 	  0,
260 	  "Intel 82801AA IDE Controller (ICH)",
261 	  piix_chip_map,
262 	},
263 	{ PCI_PRODUCT_INTEL_82801AB_IDE,
264 	  0,
265 	  "Intel 82801AB IDE Controller (ICH0)",
266 	  piix_chip_map,
267 	},
268 	{ PCI_PRODUCT_INTEL_82801BA_IDE,
269 	  0,
270 	  "Intel 82801BA IDE Controller (ICH2)",
271 	  piix_chip_map,
272 	},
273 	{ PCI_PRODUCT_INTEL_82801BAM_IDE,
274 	  0,
275 	  "Intel 82801BAM IDE Controller (ICH2)",
276 	  piix_chip_map,
277 	},
278 	{ 0,
279 	  0,
280 	  NULL,
281 	}
282 };
283 
284 const struct pciide_product_desc pciide_amd_products[] =  {
285 	{ PCI_PRODUCT_AMD_PBC756_IDE,
286 	  0,
287 	  "Advanced Micro Devices AMD756 IDE Controller",
288 	  amd756_chip_map
289 	},
290 	{ 0,
291 	  0,
292 	  NULL,
293 	}
294 };
295 
296 const struct pciide_product_desc pciide_cmd_products[] =  {
297 	{ PCI_PRODUCT_CMDTECH_640,
298 	  0,
299 	  "CMD Technology PCI0640",
300 	  cmd_chip_map
301 	},
302 	{ PCI_PRODUCT_CMDTECH_643,
303 	  0,
304 	  "CMD Technology PCI0643",
305 	  cmd0643_9_chip_map,
306 	},
307 	{ PCI_PRODUCT_CMDTECH_646,
308 	  0,
309 	  "CMD Technology PCI0646",
310 	  cmd0643_9_chip_map,
311 	},
312 	{ PCI_PRODUCT_CMDTECH_648,
313 	  IDE_PCI_CLASS_OVERRIDE,
314 	  "CMD Technology PCI0648",
315 	  cmd0643_9_chip_map,
316 	},
317 	{ PCI_PRODUCT_CMDTECH_649,
318 	  IDE_PCI_CLASS_OVERRIDE,
319 	  "CMD Technology PCI0649",
320 	  cmd0643_9_chip_map,
321 	},
322 	{ 0,
323 	  0,
324 	  NULL,
325 	}
326 };
327 
328 const struct pciide_product_desc pciide_via_products[] =  {
329 	{ PCI_PRODUCT_VIATECH_VT82C586_IDE,
330 	  0,
331 	  "VIA Tech VT82C586 IDE Controller",
332 	  apollo_chip_map,
333 	 },
334 	{ PCI_PRODUCT_VIATECH_VT82C586A_IDE,
335 	  0,
336 	  "VIA Tech VT82C586A IDE Controller",
337 	  apollo_chip_map,
338 	},
339 	{ 0,
340 	  0,
341 	  NULL,
342 	}
343 };
344 
345 const struct pciide_product_desc pciide_cypress_products[] =  {
346 	{ PCI_PRODUCT_CONTAQ_82C693,
347 	  IDE_16BIT_IOSPACE,
348 	  "Cypress 82C693 IDE Controller",
349 	  cy693_chip_map,
350 	},
351 	{ 0,
352 	  0,
353 	  NULL,
354 	}
355 };
356 
357 const struct pciide_product_desc pciide_sis_products[] =  {
358 	{ PCI_PRODUCT_SIS_5597_IDE,
359 	  0,
360 	  "Silicon Integrated System 5597/5598 IDE controller",
361 	  sis_chip_map,
362 	},
363 	{ 0,
364 	  0,
365 	  NULL,
366 	}
367 };
368 
369 const struct pciide_product_desc pciide_acer_products[] =  {
370 	{ PCI_PRODUCT_ALI_M5229,
371 	  0,
372 	  "Acer Labs M5229 UDMA IDE Controller",
373 	  acer_chip_map,
374 	},
375 	{ 0,
376 	  0,
377 	  NULL,
378 	}
379 };
380 
381 const struct pciide_product_desc pciide_promise_products[] =  {
382 	{ PCI_PRODUCT_PROMISE_ULTRA33,
383 	  IDE_PCI_CLASS_OVERRIDE,
384 	  "Promise Ultra33/ATA Bus Master IDE Accelerator",
385 	  pdc202xx_chip_map,
386 	},
387 	{ PCI_PRODUCT_PROMISE_ULTRA66,
388 	  IDE_PCI_CLASS_OVERRIDE,
389 	  "Promise Ultra66/ATA Bus Master IDE Accelerator",
390 	  pdc202xx_chip_map,
391 	},
392 	{ PCI_PRODUCT_PROMISE_ULTRA100,
393 	  IDE_PCI_CLASS_OVERRIDE,
394 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
395 	  pdc202xx_chip_map,
396 	},
397 	{ PCI_PRODUCT_PROMISE_ULTRA100X,
398 	  IDE_PCI_CLASS_OVERRIDE,
399 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
400 	  pdc202xx_chip_map,
401 	},
402 	{ 0,
403 	  0,
404 	  NULL,
405 	}
406 };
407 
408 const struct pciide_product_desc pciide_opti_products[] =  {
409 	{ PCI_PRODUCT_OPTI_82C621,
410 	  0,
411 	  "OPTi 82c621 PCI IDE controller",
412 	  opti_chip_map,
413 	},
414 	{ PCI_PRODUCT_OPTI_82C568,
415 	  0,
416 	  "OPTi 82c568 (82c621 compatible) PCI IDE controller",
417 	  opti_chip_map,
418 	},
419 	{ PCI_PRODUCT_OPTI_82D568,
420 	  0,
421 	  "OPTi 82d568 (82c621 compatible) PCI IDE controller",
422 	  opti_chip_map,
423 	},
424 	{ 0,
425 	  0,
426 	  NULL,
427 	}
428 };
429 
430 const struct pciide_product_desc pciide_triones_products[] =  {
431 	{ PCI_PRODUCT_TRIONES_HPT366,
432 	  IDE_PCI_CLASS_OVERRIDE,
433 	  "Triones/Highpoint HPT366/370 IDE Controller",
434 	  hpt_chip_map,
435 	},
436 	{ 0,
437 	  0,
438 	  NULL,
439 	}
440 };
441 
442 const struct pciide_product_desc pciide_acard_products[] =  {
443 	{ PCI_PRODUCT_ACARD_ATP850U,
444 	  IDE_PCI_CLASS_OVERRIDE,
445 	  "Acard ATP850U Ultra33 IDE Controller",
446 	  acard_chip_map,
447 	},
448 	{ PCI_PRODUCT_ACARD_ATP860,
449 	  IDE_PCI_CLASS_OVERRIDE,
450 	  "Acard ATP860 Ultra66 IDE Controller",
451 	  acard_chip_map,
452 	},
453 	{ PCI_PRODUCT_ACARD_ATP860A,
454 	  IDE_PCI_CLASS_OVERRIDE,
455 	  "Acard ATP860-A Ultra66 IDE Controller",
456 	  acard_chip_map,
457 	},
458 	{ 0,
459 	  0,
460 	  NULL,
461 	}
462 };
463 
464 struct pciide_vendor_desc {
465 	u_int32_t ide_vendor;
466 	const struct pciide_product_desc *ide_products;
467 };
468 
469 const struct pciide_vendor_desc pciide_vendors[] = {
470 	{ PCI_VENDOR_INTEL, pciide_intel_products },
471 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products },
472 	{ PCI_VENDOR_VIATECH, pciide_via_products },
473 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products },
474 	{ PCI_VENDOR_SIS, pciide_sis_products },
475 	{ PCI_VENDOR_ALI, pciide_acer_products },
476 	{ PCI_VENDOR_PROMISE, pciide_promise_products },
477 	{ PCI_VENDOR_AMD, pciide_amd_products },
478 	{ PCI_VENDOR_OPTI, pciide_opti_products },
479 	{ PCI_VENDOR_TRIONES, pciide_triones_products },
480 #ifdef PCIIDE_ACARD_ENABLE
481 	{ PCI_VENDOR_ACARD, pciide_acard_products },
482 #endif
483 	{ 0, NULL }
484 };
485 
486 /* options passed via the 'flags' config keyword */
487 #define PCIIDE_OPTIONS_DMA	0x01
488 
489 int	pciide_match __P((struct device *, struct cfdata *, void *));
490 void	pciide_attach __P((struct device *, struct device *, void *));
491 
492 struct cfattach pciide_ca = {
493 	sizeof(struct pciide_softc), pciide_match, pciide_attach
494 };
495 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
496 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
497 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
498 int	pciide_mapregs_native __P((struct pci_attach_args *,
499 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
500 	    int (*pci_intr) __P((void *))));
501 void	pciide_mapreg_dma __P((struct pciide_softc *,
502 	    struct pci_attach_args *));
503 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
504 void	pciide_mapchan __P((struct pci_attach_args *,
505 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
506 	    int (*pci_intr) __P((void *))));
507 int	pciide_chan_candisable __P((struct pciide_channel *));
508 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
509 	    struct pciide_channel *, int, int));
510 int	pciide_print __P((void *, const char *pnp));
511 int	pciide_compat_intr __P((void *));
512 int	pciide_pci_intr __P((void *));
513 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
514 
515 const struct pciide_product_desc *
516 pciide_lookup_product(id)
517 	u_int32_t id;
518 {
519 	const struct pciide_product_desc *pp;
520 	const struct pciide_vendor_desc *vp;
521 
522 	for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
523 		if (PCI_VENDOR(id) == vp->ide_vendor)
524 			break;
525 
526 	if ((pp = vp->ide_products) == NULL)
527 		return NULL;
528 
529 	for (; pp->ide_name != NULL; pp++)
530 		if (PCI_PRODUCT(id) == pp->ide_product)
531 			break;
532 
533 	if (pp->ide_name == NULL)
534 		return NULL;
535 	return pp;
536 }
537 
538 int
539 pciide_match(parent, match, aux)
540 	struct device *parent;
541 	struct cfdata *match;
542 	void *aux;
543 {
544 	struct pci_attach_args *pa = aux;
545 	const struct pciide_product_desc *pp;
546 
547 	/*
548 	 * Check the ID register to see that it's a PCI IDE controller.
549 	 * If it is, we assume that we can deal with it; it _should_
550 	 * work in a standardized way...
551 	 */
552 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
553 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
554 		return (1);
555 	}
556 
557 	/*
558 	 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
559 	 * controllers. Let see if we can deal with it anyway.
560 	 */
561 	pp = pciide_lookup_product(pa->pa_id);
562 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
563 		return (1);
564 	}
565 
566 	return (0);
567 }
568 
569 void
570 pciide_attach(parent, self, aux)
571 	struct device *parent, *self;
572 	void *aux;
573 {
574 	struct pci_attach_args *pa = aux;
575 	pci_chipset_tag_t pc = pa->pa_pc;
576 	pcitag_t tag = pa->pa_tag;
577 	struct pciide_softc *sc = (struct pciide_softc *)self;
578 	pcireg_t csr;
579 	char devinfo[256];
580 	const char *displaydev;
581 
582 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
583 	if (sc->sc_pp == NULL) {
584 		sc->sc_pp = &default_product_desc;
585 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
586 		displaydev = devinfo;
587 	} else
588 		displaydev = sc->sc_pp->ide_name;
589 
590 	printf(": %s (rev. 0x%02x)\n", displaydev, PCI_REVISION(pa->pa_class));
591 
592 	sc->sc_pc = pa->pa_pc;
593 	sc->sc_tag = pa->pa_tag;
594 #ifdef WDCDEBUG
595 	if (wdcdebug_pciide_mask & DEBUG_PROBE)
596 		pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
597 #endif
598 	sc->sc_pp->chip_map(sc, pa);
599 
600 	if (sc->sc_dma_ok) {
601 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
602 		csr |= PCI_COMMAND_MASTER_ENABLE;
603 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
604 	}
605 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
606 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
607 }
608 
609 /* tell wether the chip is enabled or not */
610 int
611 pciide_chipen(sc, pa)
612 	struct pciide_softc *sc;
613 	struct pci_attach_args *pa;
614 {
615 	pcireg_t csr;
616 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
617 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
618 		    PCI_COMMAND_STATUS_REG);
619 		printf("%s: device disabled (at %s)\n",
620 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
621 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
622 		  "device" : "bridge");
623 		return 0;
624 	}
625 	return 1;
626 }
627 
628 int
629 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
630 	struct pci_attach_args *pa;
631 	struct pciide_channel *cp;
632 	int compatchan;
633 	bus_size_t *cmdsizep, *ctlsizep;
634 {
635 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
636 	struct channel_softc *wdc_cp = &cp->wdc_channel;
637 
638 	cp->compat = 1;
639 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
640 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
641 
642 	wdc_cp->cmd_iot = pa->pa_iot;
643 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
644 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
645 		printf("%s: couldn't map %s channel cmd regs\n",
646 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
647 		return (0);
648 	}
649 
650 	wdc_cp->ctl_iot = pa->pa_iot;
651 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
652 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
653 		printf("%s: couldn't map %s channel ctl regs\n",
654 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
655 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
656 		    PCIIDE_COMPAT_CMD_SIZE);
657 		return (0);
658 	}
659 
660 	return (1);
661 }
662 
663 int
664 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
665 	struct pci_attach_args * pa;
666 	struct pciide_channel *cp;
667 	bus_size_t *cmdsizep, *ctlsizep;
668 	int (*pci_intr) __P((void *));
669 {
670 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
671 	struct channel_softc *wdc_cp = &cp->wdc_channel;
672 	const char *intrstr;
673 	pci_intr_handle_t intrhandle;
674 
675 	cp->compat = 0;
676 
677 	if (sc->sc_pci_ih == NULL) {
678 		if (pci_intr_map(pa, &intrhandle) != 0) {
679 			printf("%s: couldn't map native-PCI interrupt\n",
680 			    sc->sc_wdcdev.sc_dev.dv_xname);
681 			return 0;
682 		}
683 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
684 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
685 		    intrhandle, IPL_BIO, pci_intr, sc);
686 		if (sc->sc_pci_ih != NULL) {
687 			printf("%s: using %s for native-PCI interrupt\n",
688 			    sc->sc_wdcdev.sc_dev.dv_xname,
689 			    intrstr ? intrstr : "unknown interrupt");
690 		} else {
691 			printf("%s: couldn't establish native-PCI interrupt",
692 			    sc->sc_wdcdev.sc_dev.dv_xname);
693 			if (intrstr != NULL)
694 				printf(" at %s", intrstr);
695 			printf("\n");
696 			return 0;
697 		}
698 	}
699 	cp->ih = sc->sc_pci_ih;
700 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
701 	    PCI_MAPREG_TYPE_IO, 0,
702 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
703 		printf("%s: couldn't map %s channel cmd regs\n",
704 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
705 		return 0;
706 	}
707 
708 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
709 	    PCI_MAPREG_TYPE_IO, 0,
710 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
711 		printf("%s: couldn't map %s channel ctl regs\n",
712 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
713 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
714 		return 0;
715 	}
716 	/*
717 	 * In native mode, 4 bytes of I/O space are mapped for the control
718 	 * register, the control register is at offset 2. Pass the generic
719 	 * code a handle for only one byte at the rigth offset.
720 	 */
721 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
722 	    &wdc_cp->ctl_ioh) != 0) {
723 		printf("%s: unable to subregion %s channel ctl regs\n",
724 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
725 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
726 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
727 		return 0;
728 	}
729 	return (1);
730 }
731 
732 void
733 pciide_mapreg_dma(sc, pa)
734 	struct pciide_softc *sc;
735 	struct pci_attach_args *pa;
736 {
737 	pcireg_t maptype;
738 	bus_addr_t addr;
739 
740 	/*
741 	 * Map DMA registers
742 	 *
743 	 * Note that sc_dma_ok is the right variable to test to see if
744 	 * DMA can be done.  If the interface doesn't support DMA,
745 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
746 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
747 	 * non-zero if the interface supports DMA and the registers
748 	 * could be mapped.
749 	 *
750 	 * XXX Note that despite the fact that the Bus Master IDE specs
751 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
752 	 * XXX space," some controllers (at least the United
753 	 * XXX Microelectronics UM8886BF) place it in memory space.
754 	 */
755 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
756 	    PCIIDE_REG_BUS_MASTER_DMA);
757 
758 	switch (maptype) {
759 	case PCI_MAPREG_TYPE_IO:
760 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
761 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
762 		    &addr, NULL, NULL) == 0);
763 		if (sc->sc_dma_ok == 0) {
764 			printf(", but unused (couldn't query registers)");
765 			break;
766 		}
767 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
768 		    && addr >= 0x10000) {
769 			sc->sc_dma_ok = 0;
770 			printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
771 			break;
772 		}
773 		/* FALLTHROUGH */
774 
775 	case PCI_MAPREG_MEM_TYPE_32BIT:
776 		sc->sc_dma_ok = (pci_mapreg_map(pa,
777 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
778 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
779 		sc->sc_dmat = pa->pa_dmat;
780 		if (sc->sc_dma_ok == 0) {
781 			printf(", but unused (couldn't map registers)");
782 		} else {
783 			sc->sc_wdcdev.dma_arg = sc;
784 			sc->sc_wdcdev.dma_init = pciide_dma_init;
785 			sc->sc_wdcdev.dma_start = pciide_dma_start;
786 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
787 		}
788 		break;
789 
790 	default:
791 		sc->sc_dma_ok = 0;
792 		printf(", but unsupported register maptype (0x%x)", maptype);
793 	}
794 }
795 
796 int
797 pciide_compat_intr(arg)
798 	void *arg;
799 {
800 	struct pciide_channel *cp = arg;
801 
802 #ifdef DIAGNOSTIC
803 	/* should only be called for a compat channel */
804 	if (cp->compat == 0)
805 		panic("pciide compat intr called for non-compat chan %p\n", cp);
806 #endif
807 	return (wdcintr(&cp->wdc_channel));
808 }
809 
810 int
811 pciide_pci_intr(arg)
812 	void *arg;
813 {
814 	struct pciide_softc *sc = arg;
815 	struct pciide_channel *cp;
816 	struct channel_softc *wdc_cp;
817 	int i, rv, crv;
818 
819 	rv = 0;
820 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
821 		cp = &sc->pciide_channels[i];
822 		wdc_cp = &cp->wdc_channel;
823 
824 		/* If a compat channel skip. */
825 		if (cp->compat)
826 			continue;
827 		/* if this channel not waiting for intr, skip */
828 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
829 			continue;
830 
831 		crv = wdcintr(wdc_cp);
832 		if (crv == 0)
833 			;		/* leave rv alone */
834 		else if (crv == 1)
835 			rv = 1;		/* claim the intr */
836 		else if (rv == 0)	/* crv should be -1 in this case */
837 			rv = crv;	/* if we've done no better, take it */
838 	}
839 	return (rv);
840 }
841 
842 void
843 pciide_channel_dma_setup(cp)
844 	struct pciide_channel *cp;
845 {
846 	int drive;
847 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
848 	struct ata_drive_datas *drvp;
849 
850 	for (drive = 0; drive < 2; drive++) {
851 		drvp = &cp->wdc_channel.ch_drive[drive];
852 		/* If no drive, skip */
853 		if ((drvp->drive_flags & DRIVE) == 0)
854 			continue;
855 		/* setup DMA if needed */
856 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
857 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
858 		    sc->sc_dma_ok == 0) {
859 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
860 			continue;
861 		}
862 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
863 		    != 0) {
864 			/* Abort DMA setup */
865 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
866 			continue;
867 		}
868 	}
869 }
870 
871 int
872 pciide_dma_table_setup(sc, channel, drive)
873 	struct pciide_softc *sc;
874 	int channel, drive;
875 {
876 	bus_dma_segment_t seg;
877 	int error, rseg;
878 	const bus_size_t dma_table_size =
879 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
880 	struct pciide_dma_maps *dma_maps =
881 	    &sc->pciide_channels[channel].dma_maps[drive];
882 
883 	/* If table was already allocated, just return */
884 	if (dma_maps->dma_table)
885 		return 0;
886 
887 	/* Allocate memory for the DMA tables and map it */
888 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
889 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
890 	    BUS_DMA_NOWAIT)) != 0) {
891 		printf("%s:%d: unable to allocate table DMA for "
892 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
893 		    channel, drive, error);
894 		return error;
895 	}
896 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
897 	    dma_table_size,
898 	    (caddr_t *)&dma_maps->dma_table,
899 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
900 		printf("%s:%d: unable to map table DMA for"
901 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
902 		    channel, drive, error);
903 		return error;
904 	}
905 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
906 	    "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
907 	    (unsigned long)seg.ds_addr), DEBUG_PROBE);
908 
909 	/* Create and load table DMA map for this disk */
910 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
911 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
912 	    &dma_maps->dmamap_table)) != 0) {
913 		printf("%s:%d: unable to create table DMA map for "
914 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
915 		    channel, drive, error);
916 		return error;
917 	}
918 	if ((error = bus_dmamap_load(sc->sc_dmat,
919 	    dma_maps->dmamap_table,
920 	    dma_maps->dma_table,
921 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
922 		printf("%s:%d: unable to load table DMA map for "
923 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
924 		    channel, drive, error);
925 		return error;
926 	}
927 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
928 	    (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
929 	    DEBUG_PROBE);
930 	/* Create a xfer DMA map for this drive */
931 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
932 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
933 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
934 	    &dma_maps->dmamap_xfer)) != 0) {
935 		printf("%s:%d: unable to create xfer DMA map for "
936 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
937 		    channel, drive, error);
938 		return error;
939 	}
940 	return 0;
941 }
942 
943 int
944 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
945 	void *v;
946 	int channel, drive;
947 	void *databuf;
948 	size_t datalen;
949 	int flags;
950 {
951 	struct pciide_softc *sc = v;
952 	int error, seg;
953 	struct pciide_dma_maps *dma_maps =
954 	    &sc->pciide_channels[channel].dma_maps[drive];
955 
956 	error = bus_dmamap_load(sc->sc_dmat,
957 	    dma_maps->dmamap_xfer,
958 	    databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
959 	if (error) {
960 		printf("%s:%d: unable to load xfer DMA map for"
961 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
962 		    channel, drive, error);
963 		return error;
964 	}
965 
966 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
967 	    dma_maps->dmamap_xfer->dm_mapsize,
968 	    (flags & WDC_DMA_READ) ?
969 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
970 
971 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
972 #ifdef DIAGNOSTIC
973 		/* A segment must not cross a 64k boundary */
974 		{
975 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
976 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
977 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
978 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
979 			printf("pciide_dma: segment %d physical addr 0x%lx"
980 			    " len 0x%lx not properly aligned\n",
981 			    seg, phys, len);
982 			panic("pciide_dma: buf align");
983 		}
984 		}
985 #endif
986 		dma_maps->dma_table[seg].base_addr =
987 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
988 		dma_maps->dma_table[seg].byte_count =
989 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
990 		    IDEDMA_BYTE_COUNT_MASK);
991 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
992 		   seg, le32toh(dma_maps->dma_table[seg].byte_count),
993 		   le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
994 
995 	}
996 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
997 	    htole32(IDEDMA_BYTE_COUNT_EOT);
998 
999 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1000 	    dma_maps->dmamap_table->dm_mapsize,
1001 	    BUS_DMASYNC_PREWRITE);
1002 
1003 	/* Maps are ready. Start DMA function */
1004 #ifdef DIAGNOSTIC
1005 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1006 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1007 		    (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1008 		panic("pciide_dma_init: table align");
1009 	}
1010 #endif
1011 
1012 	/* Clear status bits */
1013 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1014 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1015 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1016 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1017 	/* Write table addr */
1018 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1019 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1020 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1021 	/* set read/write */
1022 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1023 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1024 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1025 	/* remember flags */
1026 	dma_maps->dma_flags = flags;
1027 	return 0;
1028 }
1029 
1030 void
1031 pciide_dma_start(v, channel, drive)
1032 	void *v;
1033 	int channel, drive;
1034 {
1035 	struct pciide_softc *sc = v;
1036 
1037 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1038 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1039 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1040 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1041 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1042 }
1043 
1044 int
1045 pciide_dma_finish(v, channel, drive, force)
1046 	void *v;
1047 	int channel, drive;
1048 	int force;
1049 {
1050 	struct pciide_softc *sc = v;
1051 	u_int8_t status;
1052 	int error = 0;
1053 	struct pciide_dma_maps *dma_maps =
1054 	    &sc->pciide_channels[channel].dma_maps[drive];
1055 
1056 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1057 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1058 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1059 	    DEBUG_XFERS);
1060 
1061 	if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1062 		return WDC_DMAST_NOIRQ;
1063 
1064 	/* stop DMA channel */
1065 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1066 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1067 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1068 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1069 
1070 	/* Unload the map of the data buffer */
1071 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1072 	    dma_maps->dmamap_xfer->dm_mapsize,
1073 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1074 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1075 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1076 
1077 	if ((status & IDEDMA_CTL_ERR) != 0) {
1078 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1079 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1080 		error |= WDC_DMAST_ERR;
1081 	}
1082 
1083 	if ((status & IDEDMA_CTL_INTR) == 0) {
1084 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1085 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1086 		    drive, status);
1087 		error |= WDC_DMAST_NOIRQ;
1088 	}
1089 
1090 	if ((status & IDEDMA_CTL_ACT) != 0) {
1091 		/* data underrun, may be a valid condition for ATAPI */
1092 		error |= WDC_DMAST_UNDER;
1093 	}
1094 	return error;
1095 }
1096 
1097 void
1098 pciide_irqack(chp)
1099 	struct channel_softc *chp;
1100 {
1101 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1102 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1103 
1104 	/* clear status bits in IDE DMA registers */
1105 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1106 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1107 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1108 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1109 }
1110 
1111 /* some common code used by several chip_map */
1112 int
1113 pciide_chansetup(sc, channel, interface)
1114 	struct pciide_softc *sc;
1115 	int channel;
1116 	pcireg_t interface;
1117 {
1118 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1119 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1120 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1121 	cp->wdc_channel.channel = channel;
1122 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1123 	cp->wdc_channel.ch_queue =
1124 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1125 	if (cp->wdc_channel.ch_queue == NULL) {
1126 		printf("%s %s channel: "
1127 		    "can't allocate memory for command queue",
1128 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1129 		return 0;
1130 	}
1131 	printf("%s: %s channel %s to %s mode\n",
1132 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1133 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1134 	    "configured" : "wired",
1135 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1136 	    "native-PCI" : "compatibility");
1137 	return 1;
1138 }
1139 
1140 /* some common code used by several chip channel_map */
1141 void
1142 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1143 	struct pci_attach_args *pa;
1144 	struct pciide_channel *cp;
1145 	pcireg_t interface;
1146 	bus_size_t *cmdsizep, *ctlsizep;
1147 	int (*pci_intr) __P((void *));
1148 {
1149 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1150 
1151 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1152 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1153 		    pci_intr);
1154 	else
1155 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1156 		    wdc_cp->channel, cmdsizep, ctlsizep);
1157 
1158 	if (cp->hw_ok == 0)
1159 		return;
1160 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1161 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1162 	wdcattach(wdc_cp);
1163 }
1164 
1165 /*
1166  * Generic code to call to know if a channel can be disabled. Return 1
1167  * if channel can be disabled, 0 if not
1168  */
1169 int
1170 pciide_chan_candisable(cp)
1171 	struct pciide_channel *cp;
1172 {
1173 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1174 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1175 
1176 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1177 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1178 		printf("%s: disabling %s channel (no drives)\n",
1179 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1180 		cp->hw_ok = 0;
1181 		return 1;
1182 	}
1183 	return 0;
1184 }
1185 
1186 /*
1187  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1188  * Set hw_ok=0 on failure
1189  */
1190 void
1191 pciide_map_compat_intr(pa, cp, compatchan, interface)
1192 	struct pci_attach_args *pa;
1193 	struct pciide_channel *cp;
1194 	int compatchan, interface;
1195 {
1196 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1197 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1198 
1199 	if (cp->hw_ok == 0)
1200 		return;
1201 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1202 		return;
1203 
1204 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1205 	    pa, compatchan, pciide_compat_intr, cp);
1206 	if (cp->ih == NULL) {
1207 		printf("%s: no compatibility interrupt for use by %s "
1208 		    "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1209 		cp->hw_ok = 0;
1210 	}
1211 }
1212 
1213 void
1214 pciide_print_modes(cp)
1215 	struct pciide_channel *cp;
1216 {
1217 	wdc_print_modes(&cp->wdc_channel);
1218 }
1219 
1220 void
1221 default_chip_map(sc, pa)
1222 	struct pciide_softc *sc;
1223 	struct pci_attach_args *pa;
1224 {
1225 	struct pciide_channel *cp;
1226 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1227 	pcireg_t csr;
1228 	int channel, drive;
1229 	struct ata_drive_datas *drvp;
1230 	u_int8_t idedma_ctl;
1231 	bus_size_t cmdsize, ctlsize;
1232 	char *failreason;
1233 
1234 	if (pciide_chipen(sc, pa) == 0)
1235 		return;
1236 
1237 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1238 		printf("%s: bus-master DMA support present",
1239 		    sc->sc_wdcdev.sc_dev.dv_xname);
1240 		if (sc->sc_pp == &default_product_desc &&
1241 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1242 		    PCIIDE_OPTIONS_DMA) == 0) {
1243 			printf(", but unused (no driver support)");
1244 			sc->sc_dma_ok = 0;
1245 		} else {
1246 			pciide_mapreg_dma(sc, pa);
1247 		if (sc->sc_dma_ok != 0)
1248 			printf(", used without full driver "
1249 			    "support");
1250 		}
1251 	} else {
1252 		printf("%s: hardware does not support DMA",
1253 		    sc->sc_wdcdev.sc_dev.dv_xname);
1254 		sc->sc_dma_ok = 0;
1255 	}
1256 	printf("\n");
1257 	if (sc->sc_dma_ok) {
1258 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1259 		sc->sc_wdcdev.irqack = pciide_irqack;
1260 	}
1261 	sc->sc_wdcdev.PIO_cap = 0;
1262 	sc->sc_wdcdev.DMA_cap = 0;
1263 
1264 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1265 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1266 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1267 
1268 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1269 		cp = &sc->pciide_channels[channel];
1270 		if (pciide_chansetup(sc, channel, interface) == 0)
1271 			continue;
1272 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1273 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1274 			    &ctlsize, pciide_pci_intr);
1275 		} else {
1276 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1277 			    channel, &cmdsize, &ctlsize);
1278 		}
1279 		if (cp->hw_ok == 0)
1280 			continue;
1281 		/*
1282 		 * Check to see if something appears to be there.
1283 		 */
1284 		failreason = NULL;
1285 		if (!wdcprobe(&cp->wdc_channel)) {
1286 			failreason = "not responding; disabled or no drives?";
1287 			goto next;
1288 		}
1289 		/*
1290 		 * Now, make sure it's actually attributable to this PCI IDE
1291 		 * channel by trying to access the channel again while the
1292 		 * PCI IDE controller's I/O space is disabled.  (If the
1293 		 * channel no longer appears to be there, it belongs to
1294 		 * this controller.)  YUCK!
1295 		 */
1296 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1297 		    PCI_COMMAND_STATUS_REG);
1298 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1299 		    csr & ~PCI_COMMAND_IO_ENABLE);
1300 		if (wdcprobe(&cp->wdc_channel))
1301 			failreason = "other hardware responding at addresses";
1302 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1303 		    PCI_COMMAND_STATUS_REG, csr);
1304 next:
1305 		if (failreason) {
1306 			printf("%s: %s channel ignored (%s)\n",
1307 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1308 			    failreason);
1309 			cp->hw_ok = 0;
1310 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1311 			    cp->wdc_channel.cmd_ioh, cmdsize);
1312 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1313 			    cp->wdc_channel.ctl_ioh, ctlsize);
1314 		} else {
1315 			pciide_map_compat_intr(pa, cp, channel, interface);
1316 		}
1317 		if (cp->hw_ok) {
1318 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1319 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1320 			wdcattach(&cp->wdc_channel);
1321 		}
1322 	}
1323 
1324 	if (sc->sc_dma_ok == 0)
1325 		return;
1326 
1327 	/* Allocate DMA maps */
1328 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1329 		idedma_ctl = 0;
1330 		cp = &sc->pciide_channels[channel];
1331 		for (drive = 0; drive < 2; drive++) {
1332 			drvp = &cp->wdc_channel.ch_drive[drive];
1333 			/* If no drive, skip */
1334 			if ((drvp->drive_flags & DRIVE) == 0)
1335 				continue;
1336 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1337 				continue;
1338 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1339 				/* Abort DMA setup */
1340 				printf("%s:%d:%d: can't allocate DMA maps, "
1341 				    "using PIO transfers\n",
1342 				    sc->sc_wdcdev.sc_dev.dv_xname,
1343 				    channel, drive);
1344 				drvp->drive_flags &= ~DRIVE_DMA;
1345 			}
1346 			printf("%s:%d:%d: using DMA data transfers\n",
1347 			    sc->sc_wdcdev.sc_dev.dv_xname,
1348 			    channel, drive);
1349 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1350 		}
1351 		if (idedma_ctl != 0) {
1352 			/* Add software bits in status register */
1353 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1354 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1355 			    idedma_ctl);
1356 		}
1357 	}
1358 }
1359 
1360 void
1361 piix_chip_map(sc, pa)
1362 	struct pciide_softc *sc;
1363 	struct pci_attach_args *pa;
1364 {
1365 	struct pciide_channel *cp;
1366 	int channel;
1367 	u_int32_t idetim;
1368 	bus_size_t cmdsize, ctlsize;
1369 
1370 	if (pciide_chipen(sc, pa) == 0)
1371 		return;
1372 
1373 	printf("%s: bus-master DMA support present",
1374 	    sc->sc_wdcdev.sc_dev.dv_xname);
1375 	pciide_mapreg_dma(sc, pa);
1376 	printf("\n");
1377 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1378 	    WDC_CAPABILITY_MODE;
1379 	if (sc->sc_dma_ok) {
1380 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1381 		sc->sc_wdcdev.irqack = pciide_irqack;
1382 		switch(sc->sc_pp->ide_product) {
1383 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1384 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1385 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1386 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1387 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1388 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1389 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1390 		}
1391 	}
1392 	sc->sc_wdcdev.PIO_cap = 4;
1393 	sc->sc_wdcdev.DMA_cap = 2;
1394 	switch(sc->sc_pp->ide_product) {
1395 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1396 		sc->sc_wdcdev.UDMA_cap = 4;
1397 		break;
1398 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1399 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1400 		sc->sc_wdcdev.UDMA_cap = 5;
1401 		break;
1402 	default:
1403 		sc->sc_wdcdev.UDMA_cap = 2;
1404 	}
1405 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1406 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1407 	else
1408 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1409 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1410 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1411 
1412 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1413 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1414 	    DEBUG_PROBE);
1415 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1416 		WDCDEBUG_PRINT((", sidetim=0x%x",
1417 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1418 		    DEBUG_PROBE);
1419 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1420 			WDCDEBUG_PRINT((", udamreg 0x%x",
1421 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1422 			    DEBUG_PROBE);
1423 		}
1424 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1425 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1426 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1427 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1428 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1429 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1430 			    DEBUG_PROBE);
1431 		}
1432 
1433 	}
1434 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1435 
1436 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1437 		cp = &sc->pciide_channels[channel];
1438 		/* PIIX is compat-only */
1439 		if (pciide_chansetup(sc, channel, 0) == 0)
1440 			continue;
1441 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1442 		if ((PIIX_IDETIM_READ(idetim, channel) &
1443 		    PIIX_IDETIM_IDE) == 0) {
1444 			printf("%s: %s channel ignored (disabled)\n",
1445 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1446 			continue;
1447 		}
1448 		/* PIIX are compat-only pciide devices */
1449 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1450 		if (cp->hw_ok == 0)
1451 			continue;
1452 		if (pciide_chan_candisable(cp)) {
1453 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1454 			    channel);
1455 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1456 			    idetim);
1457 		}
1458 		pciide_map_compat_intr(pa, cp, channel, 0);
1459 		if (cp->hw_ok == 0)
1460 			continue;
1461 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1462 	}
1463 
1464 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1465 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1466 	    DEBUG_PROBE);
1467 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1468 		WDCDEBUG_PRINT((", sidetim=0x%x",
1469 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1470 		    DEBUG_PROBE);
1471 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1472 			WDCDEBUG_PRINT((", udamreg 0x%x",
1473 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1474 			    DEBUG_PROBE);
1475 		}
1476 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1477 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1478 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1479 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1480 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1481 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1482 			    DEBUG_PROBE);
1483 		}
1484 	}
1485 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1486 }
1487 
1488 void
1489 piix_setup_channel(chp)
1490 	struct channel_softc *chp;
1491 {
1492 	u_int8_t mode[2], drive;
1493 	u_int32_t oidetim, idetim, idedma_ctl;
1494 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1495 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1496 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1497 
1498 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1499 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1500 	idedma_ctl = 0;
1501 
1502 	/* set up new idetim: Enable IDE registers decode */
1503 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1504 	    chp->channel);
1505 
1506 	/* setup DMA */
1507 	pciide_channel_dma_setup(cp);
1508 
1509 	/*
1510 	 * Here we have to mess up with drives mode: PIIX can't have
1511 	 * different timings for master and slave drives.
1512 	 * We need to find the best combination.
1513 	 */
1514 
1515 	/* If both drives supports DMA, take the lower mode */
1516 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1517 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1518 		mode[0] = mode[1] =
1519 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1520 		    drvp[0].DMA_mode = mode[0];
1521 		    drvp[1].DMA_mode = mode[1];
1522 		goto ok;
1523 	}
1524 	/*
1525 	 * If only one drive supports DMA, use its mode, and
1526 	 * put the other one in PIO mode 0 if mode not compatible
1527 	 */
1528 	if (drvp[0].drive_flags & DRIVE_DMA) {
1529 		mode[0] = drvp[0].DMA_mode;
1530 		mode[1] = drvp[1].PIO_mode;
1531 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1532 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1533 			mode[1] = drvp[1].PIO_mode = 0;
1534 		goto ok;
1535 	}
1536 	if (drvp[1].drive_flags & DRIVE_DMA) {
1537 		mode[1] = drvp[1].DMA_mode;
1538 		mode[0] = drvp[0].PIO_mode;
1539 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1540 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1541 			mode[0] = drvp[0].PIO_mode = 0;
1542 		goto ok;
1543 	}
1544 	/*
1545 	 * If both drives are not DMA, takes the lower mode, unless
1546 	 * one of them is PIO mode < 2
1547 	 */
1548 	if (drvp[0].PIO_mode < 2) {
1549 		mode[0] = drvp[0].PIO_mode = 0;
1550 		mode[1] = drvp[1].PIO_mode;
1551 	} else if (drvp[1].PIO_mode < 2) {
1552 		mode[1] = drvp[1].PIO_mode = 0;
1553 		mode[0] = drvp[0].PIO_mode;
1554 	} else {
1555 		mode[0] = mode[1] =
1556 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1557 		drvp[0].PIO_mode = mode[0];
1558 		drvp[1].PIO_mode = mode[1];
1559 	}
1560 ok:	/* The modes are setup */
1561 	for (drive = 0; drive < 2; drive++) {
1562 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1563 			idetim |= piix_setup_idetim_timings(
1564 			    mode[drive], 1, chp->channel);
1565 			goto end;
1566 		}
1567 	}
1568 	/* If we are there, none of the drives are DMA */
1569 	if (mode[0] >= 2)
1570 		idetim |= piix_setup_idetim_timings(
1571 		    mode[0], 0, chp->channel);
1572 	else
1573 		idetim |= piix_setup_idetim_timings(
1574 		    mode[1], 0, chp->channel);
1575 end:	/*
1576 	 * timing mode is now set up in the controller. Enable
1577 	 * it per-drive
1578 	 */
1579 	for (drive = 0; drive < 2; drive++) {
1580 		/* If no drive, skip */
1581 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1582 			continue;
1583 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1584 		if (drvp[drive].drive_flags & DRIVE_DMA)
1585 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1586 	}
1587 	if (idedma_ctl != 0) {
1588 		/* Add software bits in status register */
1589 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1590 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1591 		    idedma_ctl);
1592 	}
1593 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1594 	pciide_print_modes(cp);
1595 }
1596 
1597 void
1598 piix3_4_setup_channel(chp)
1599 	struct channel_softc *chp;
1600 {
1601 	struct ata_drive_datas *drvp;
1602 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1603 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1604 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1605 	int drive;
1606 	int channel = chp->channel;
1607 
1608 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1609 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1610 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1611 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1612 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1613 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1614 	    PIIX_SIDETIM_RTC_MASK(channel));
1615 
1616 	idedma_ctl = 0;
1617 	/* If channel disabled, no need to go further */
1618 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1619 		return;
1620 	/* set up new idetim: Enable IDE registers decode */
1621 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1622 
1623 	/* setup DMA if needed */
1624 	pciide_channel_dma_setup(cp);
1625 
1626 	for (drive = 0; drive < 2; drive++) {
1627 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1628 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1629 		drvp = &chp->ch_drive[drive];
1630 		/* If no drive, skip */
1631 		if ((drvp->drive_flags & DRIVE) == 0)
1632 			continue;
1633 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1634 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1635 			goto pio;
1636 
1637 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1638 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1639 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1640 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1641 			ideconf |= PIIX_CONFIG_PINGPONG;
1642 		}
1643 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1644 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1645 			/* setup Ultra/100 */
1646 			if (drvp->UDMA_mode > 2 &&
1647 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1648 				drvp->UDMA_mode = 2;
1649 			if (drvp->UDMA_mode > 4) {
1650 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1651 			} else {
1652 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1653 				if (drvp->UDMA_mode > 2) {
1654 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1655 					    drive);
1656 				} else {
1657 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1658 					    drive);
1659 				}
1660 			}
1661 		}
1662 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1663 			/* setup Ultra/66 */
1664 			if (drvp->UDMA_mode > 2 &&
1665 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1666 				drvp->UDMA_mode = 2;
1667 			if (drvp->UDMA_mode > 2)
1668 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1669 			else
1670 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1671 		}
1672 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1673 		    (drvp->drive_flags & DRIVE_UDMA)) {
1674 			/* use Ultra/DMA */
1675 			drvp->drive_flags &= ~DRIVE_DMA;
1676 			udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1677 			udmareg |= PIIX_UDMATIM_SET(
1678 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1679 		} else {
1680 			/* use Multiword DMA */
1681 			drvp->drive_flags &= ~DRIVE_UDMA;
1682 			if (drive == 0) {
1683 				idetim |= piix_setup_idetim_timings(
1684 				    drvp->DMA_mode, 1, channel);
1685 			} else {
1686 				sidetim |= piix_setup_sidetim_timings(
1687 					drvp->DMA_mode, 1, channel);
1688 				idetim =PIIX_IDETIM_SET(idetim,
1689 				    PIIX_IDETIM_SITRE, channel);
1690 			}
1691 		}
1692 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1693 
1694 pio:		/* use PIO mode */
1695 		idetim |= piix_setup_idetim_drvs(drvp);
1696 		if (drive == 0) {
1697 			idetim |= piix_setup_idetim_timings(
1698 			    drvp->PIO_mode, 0, channel);
1699 		} else {
1700 			sidetim |= piix_setup_sidetim_timings(
1701 				drvp->PIO_mode, 0, channel);
1702 			idetim =PIIX_IDETIM_SET(idetim,
1703 			    PIIX_IDETIM_SITRE, channel);
1704 		}
1705 	}
1706 	if (idedma_ctl != 0) {
1707 		/* Add software bits in status register */
1708 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1709 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1710 		    idedma_ctl);
1711 	}
1712 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1713 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1714 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1715 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1716 	pciide_print_modes(cp);
1717 }
1718 
1719 
1720 /* setup ISP and RTC fields, based on mode */
1721 static u_int32_t
1722 piix_setup_idetim_timings(mode, dma, channel)
1723 	u_int8_t mode;
1724 	u_int8_t dma;
1725 	u_int8_t channel;
1726 {
1727 
1728 	if (dma)
1729 		return PIIX_IDETIM_SET(0,
1730 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1731 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1732 		    channel);
1733 	else
1734 		return PIIX_IDETIM_SET(0,
1735 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1736 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1737 		    channel);
1738 }
1739 
1740 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1741 static u_int32_t
1742 piix_setup_idetim_drvs(drvp)
1743 	struct ata_drive_datas *drvp;
1744 {
1745 	u_int32_t ret = 0;
1746 	struct channel_softc *chp = drvp->chnl_softc;
1747 	u_int8_t channel = chp->channel;
1748 	u_int8_t drive = drvp->drive;
1749 
1750 	/*
1751 	 * If drive is using UDMA, timings setups are independant
1752 	 * So just check DMA and PIO here.
1753 	 */
1754 	if (drvp->drive_flags & DRIVE_DMA) {
1755 		/* if mode = DMA mode 0, use compatible timings */
1756 		if ((drvp->drive_flags & DRIVE_DMA) &&
1757 		    drvp->DMA_mode == 0) {
1758 			drvp->PIO_mode = 0;
1759 			return ret;
1760 		}
1761 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1762 		/*
1763 		 * PIO and DMA timings are the same, use fast timings for PIO
1764 		 * too, else use compat timings.
1765 		 */
1766 		if ((piix_isp_pio[drvp->PIO_mode] !=
1767 		    piix_isp_dma[drvp->DMA_mode]) ||
1768 		    (piix_rtc_pio[drvp->PIO_mode] !=
1769 		    piix_rtc_dma[drvp->DMA_mode]))
1770 			drvp->PIO_mode = 0;
1771 		/* if PIO mode <= 2, use compat timings for PIO */
1772 		if (drvp->PIO_mode <= 2) {
1773 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1774 			    channel);
1775 			return ret;
1776 		}
1777 	}
1778 
1779 	/*
1780 	 * Now setup PIO modes. If mode < 2, use compat timings.
1781 	 * Else enable fast timings. Enable IORDY and prefetch/post
1782 	 * if PIO mode >= 3.
1783 	 */
1784 
1785 	if (drvp->PIO_mode < 2)
1786 		return ret;
1787 
1788 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1789 	if (drvp->PIO_mode >= 3) {
1790 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1791 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1792 	}
1793 	return ret;
1794 }
1795 
1796 /* setup values in SIDETIM registers, based on mode */
1797 static u_int32_t
1798 piix_setup_sidetim_timings(mode, dma, channel)
1799 	u_int8_t mode;
1800 	u_int8_t dma;
1801 	u_int8_t channel;
1802 {
1803 	if (dma)
1804 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1805 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1806 	else
1807 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1808 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1809 }
1810 
1811 void
1812 amd756_chip_map(sc, pa)
1813 	struct pciide_softc *sc;
1814 	struct pci_attach_args *pa;
1815 {
1816 	struct pciide_channel *cp;
1817 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1818 	int channel;
1819 	pcireg_t chanenable;
1820 	bus_size_t cmdsize, ctlsize;
1821 
1822 	if (pciide_chipen(sc, pa) == 0)
1823 		return;
1824 	printf("%s: bus-master DMA support present",
1825 	    sc->sc_wdcdev.sc_dev.dv_xname);
1826 	pciide_mapreg_dma(sc, pa);
1827 	printf("\n");
1828 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1829 	    WDC_CAPABILITY_MODE;
1830 	if (sc->sc_dma_ok) {
1831 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1832 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1833 		sc->sc_wdcdev.irqack = pciide_irqack;
1834 	}
1835 	sc->sc_wdcdev.PIO_cap = 4;
1836 	sc->sc_wdcdev.DMA_cap = 2;
1837 	sc->sc_wdcdev.UDMA_cap = 4;
1838 	sc->sc_wdcdev.set_modes = amd756_setup_channel;
1839 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1840 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1841 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1842 
1843 	WDCDEBUG_PRINT(("amd756_chip_map: Channel enable=0x%x\n", chanenable),
1844 	    DEBUG_PROBE);
1845 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1846 		cp = &sc->pciide_channels[channel];
1847 		if (pciide_chansetup(sc, channel, interface) == 0)
1848 			continue;
1849 
1850 		if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1851 			printf("%s: %s channel ignored (disabled)\n",
1852 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1853 			continue;
1854 		}
1855 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1856 		    pciide_pci_intr);
1857 
1858 		if (pciide_chan_candisable(cp))
1859 			chanenable &= ~AMD756_CHAN_EN(channel);
1860 		pciide_map_compat_intr(pa, cp, channel, interface);
1861 		if (cp->hw_ok == 0)
1862 			continue;
1863 
1864 		amd756_setup_channel(&cp->wdc_channel);
1865 	}
1866 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1867 	    chanenable);
1868 	return;
1869 }
1870 
1871 void
1872 amd756_setup_channel(chp)
1873 	struct channel_softc *chp;
1874 {
1875 	u_int32_t udmatim_reg, datatim_reg;
1876 	u_int8_t idedma_ctl;
1877 	int mode, drive;
1878 	struct ata_drive_datas *drvp;
1879 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1880 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1881 #ifndef PCIIDE_AMD756_ENABLEDMA
1882 	int rev = PCI_REVISION(
1883 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1884 #endif
1885 
1886 	idedma_ctl = 0;
1887 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
1888 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
1889 	datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
1890 	udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
1891 
1892 	/* setup DMA if needed */
1893 	pciide_channel_dma_setup(cp);
1894 
1895 	for (drive = 0; drive < 2; drive++) {
1896 		drvp = &chp->ch_drive[drive];
1897 		/* If no drive, skip */
1898 		if ((drvp->drive_flags & DRIVE) == 0)
1899 			continue;
1900 		/* add timing values, setup DMA if needed */
1901 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1902 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1903 			mode = drvp->PIO_mode;
1904 			goto pio;
1905 		}
1906 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1907 		    (drvp->drive_flags & DRIVE_UDMA)) {
1908 			/* use Ultra/DMA */
1909 			drvp->drive_flags &= ~DRIVE_DMA;
1910 			udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
1911 			    AMD756_UDMA_EN_MTH(chp->channel, drive) |
1912 			    AMD756_UDMA_TIME(chp->channel, drive,
1913 				amd756_udma_tim[drvp->UDMA_mode]);
1914 			/* can use PIO timings, MW DMA unused */
1915 			mode = drvp->PIO_mode;
1916 		} else {
1917 			/* use Multiword DMA, but only if revision is OK */
1918 			drvp->drive_flags &= ~DRIVE_UDMA;
1919 #ifndef PCIIDE_AMD756_ENABLEDMA
1920 			/*
1921 			 * The workaround doesn't seem to be necessary
1922 			 * with all drives, so it can be disabled by
1923 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1924 			 * triggered.
1925 			 */
1926 			if (AMD756_CHIPREV_DISABLEDMA(rev)) {
1927 				printf("%s:%d:%d: multi-word DMA disabled due "
1928 				    "to chip revision\n",
1929 				    sc->sc_wdcdev.sc_dev.dv_xname,
1930 				    chp->channel, drive);
1931 				mode = drvp->PIO_mode;
1932 				drvp->drive_flags &= ~DRIVE_DMA;
1933 				goto pio;
1934 			}
1935 #endif
1936 			/* mode = min(pio, dma+2) */
1937 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
1938 				mode = drvp->PIO_mode;
1939 			else
1940 				mode = drvp->DMA_mode + 2;
1941 		}
1942 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1943 
1944 pio:		/* setup PIO mode */
1945 		if (mode <= 2) {
1946 			drvp->DMA_mode = 0;
1947 			drvp->PIO_mode = 0;
1948 			mode = 0;
1949 		} else {
1950 			drvp->PIO_mode = mode;
1951 			drvp->DMA_mode = mode - 2;
1952 		}
1953 		datatim_reg |=
1954 		    AMD756_DATATIM_PULSE(chp->channel, drive,
1955 			amd756_pio_set[mode]) |
1956 		    AMD756_DATATIM_RECOV(chp->channel, drive,
1957 			amd756_pio_rec[mode]);
1958 	}
1959 	if (idedma_ctl != 0) {
1960 		/* Add software bits in status register */
1961 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1962 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1963 		    idedma_ctl);
1964 	}
1965 	pciide_print_modes(cp);
1966 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
1967 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
1968 }
1969 
1970 void
1971 apollo_chip_map(sc, pa)
1972 	struct pciide_softc *sc;
1973 	struct pci_attach_args *pa;
1974 {
1975 	struct pciide_channel *cp;
1976 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1977 	int rev = PCI_REVISION(pa->pa_class);
1978 	int channel;
1979 	u_int32_t ideconf, udma_conf, old_udma_conf;
1980 	bus_size_t cmdsize, ctlsize;
1981 
1982 	if (pciide_chipen(sc, pa) == 0)
1983 		return;
1984 	printf("%s: bus-master DMA support present",
1985 	    sc->sc_wdcdev.sc_dev.dv_xname);
1986 	pciide_mapreg_dma(sc, pa);
1987 	printf("\n");
1988 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1989 	    WDC_CAPABILITY_MODE;
1990 	if (sc->sc_dma_ok) {
1991 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1992 		sc->sc_wdcdev.irqack = pciide_irqack;
1993 		if (sc->sc_pp->ide_product == PCI_PRODUCT_VIATECH_VT82C586A_IDE
1994 		    && rev >= 6)
1995 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1996 	}
1997 	sc->sc_wdcdev.PIO_cap = 4;
1998 	sc->sc_wdcdev.DMA_cap = 2;
1999 	sc->sc_wdcdev.UDMA_cap = 2;
2000 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2001 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2002 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2003 
2004 	old_udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2005 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2006 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2007 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2008 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2009 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2010 	    old_udma_conf),
2011 	    DEBUG_PROBE);
2012 	pci_conf_write(sc->sc_pc, sc->sc_tag,
2013 	    old_udma_conf | (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
2014 	    APO_UDMA_EN_MTH(0, 0) | APO_UDMA_CLK66(0)),
2015 	    APO_UDMA);
2016 	udma_conf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2017 	WDCDEBUG_PRINT(("apollo_chip_map: APO_UDMA now 0x%x\n", udma_conf),
2018 	    DEBUG_PROBE);
2019 	if ((udma_conf & (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
2020 	    APO_UDMA_EN_MTH(0, 0))) ==
2021 	    (APO_UDMA_PIO_MODE(0, 0) | APO_UDMA_EN(0, 0) |
2022 	    APO_UDMA_EN_MTH(0, 0))) {
2023 		if ((udma_conf & APO_UDMA_CLK66(0)) ==
2024 		    APO_UDMA_CLK66(0)) {
2025 			printf("%s: Ultra/66 capable\n",
2026 			    sc->sc_wdcdev.sc_dev.dv_xname);
2027 			sc->sc_wdcdev.UDMA_cap = 4;
2028 		} else {
2029 			printf("%s: Ultra/33 capable\n",
2030 			    sc->sc_wdcdev.sc_dev.dv_xname);
2031 			sc->sc_wdcdev.UDMA_cap = 2;
2032 		}
2033 	} else {
2034 		sc->sc_wdcdev.cap &= ~WDC_CAPABILITY_UDMA;
2035 	}
2036 	pci_conf_write(sc->sc_pc, sc->sc_tag, old_udma_conf, APO_UDMA);
2037 
2038 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2039 		cp = &sc->pciide_channels[channel];
2040 		if (pciide_chansetup(sc, channel, interface) == 0)
2041 			continue;
2042 
2043 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2044 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2045 			printf("%s: %s channel ignored (disabled)\n",
2046 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2047 			continue;
2048 		}
2049 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2050 		    pciide_pci_intr);
2051 		if (cp->hw_ok == 0)
2052 			continue;
2053 		if (pciide_chan_candisable(cp)) {
2054 			ideconf &= ~APO_IDECONF_EN(channel);
2055 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2056 			    ideconf);
2057 		}
2058 		pciide_map_compat_intr(pa, cp, channel, interface);
2059 
2060 		if (cp->hw_ok == 0)
2061 			continue;
2062 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2063 	}
2064 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2065 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2066 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2067 }
2068 
2069 void
2070 apollo_setup_channel(chp)
2071 	struct channel_softc *chp;
2072 {
2073 	u_int32_t udmatim_reg, datatim_reg;
2074 	u_int8_t idedma_ctl;
2075 	int mode, drive;
2076 	struct ata_drive_datas *drvp;
2077 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2078 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2079 
2080 	idedma_ctl = 0;
2081 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2082 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2083 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2084 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2085 
2086 	/* setup DMA if needed */
2087 	pciide_channel_dma_setup(cp);
2088 
2089 	/*
2090 	 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2091 	 * downgrade to Ultra/33 if needed
2092 	 */
2093 	if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2094 	    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2095 		/* both drives UDMA */
2096 		if (chp->ch_drive[0].UDMA_mode > 2 &&
2097 		    chp->ch_drive[1].UDMA_mode <= 2) {
2098 			/* drive 0 Ultra/66, drive 1 Ultra/33 */
2099 			chp->ch_drive[0].UDMA_mode = 2;
2100 		} else if (chp->ch_drive[1].UDMA_mode > 2 &&
2101 		    chp->ch_drive[0].UDMA_mode <= 2) {
2102 			/* drive 1 Ultra/66, drive 0 Ultra/33 */
2103 			chp->ch_drive[1].UDMA_mode = 2;
2104 		}
2105 	}
2106 
2107 	for (drive = 0; drive < 2; drive++) {
2108 		drvp = &chp->ch_drive[drive];
2109 		/* If no drive, skip */
2110 		if ((drvp->drive_flags & DRIVE) == 0)
2111 			continue;
2112 		/* add timing values, setup DMA if needed */
2113 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2114 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2115 			mode = drvp->PIO_mode;
2116 			goto pio;
2117 		}
2118 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2119 		    (drvp->drive_flags & DRIVE_UDMA)) {
2120 			/* use Ultra/DMA */
2121 			drvp->drive_flags &= ~DRIVE_DMA;
2122 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2123 			    APO_UDMA_EN_MTH(chp->channel, drive) |
2124 			    APO_UDMA_TIME(chp->channel, drive,
2125 				apollo_udma_tim[drvp->UDMA_mode]);
2126 			if (drvp->UDMA_mode > 2)
2127 				udmatim_reg |=
2128 				    APO_UDMA_CLK66(chp->channel);
2129 			/* can use PIO timings, MW DMA unused */
2130 			mode = drvp->PIO_mode;
2131 		} else {
2132 			/* use Multiword DMA */
2133 			drvp->drive_flags &= ~DRIVE_UDMA;
2134 			/* mode = min(pio, dma+2) */
2135 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2136 				mode = drvp->PIO_mode;
2137 			else
2138 				mode = drvp->DMA_mode + 2;
2139 		}
2140 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2141 
2142 pio:		/* setup PIO mode */
2143 		if (mode <= 2) {
2144 			drvp->DMA_mode = 0;
2145 			drvp->PIO_mode = 0;
2146 			mode = 0;
2147 		} else {
2148 			drvp->PIO_mode = mode;
2149 			drvp->DMA_mode = mode - 2;
2150 		}
2151 		datatim_reg |=
2152 		    APO_DATATIM_PULSE(chp->channel, drive,
2153 			apollo_pio_set[mode]) |
2154 		    APO_DATATIM_RECOV(chp->channel, drive,
2155 			apollo_pio_rec[mode]);
2156 	}
2157 	if (idedma_ctl != 0) {
2158 		/* Add software bits in status register */
2159 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2160 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2161 		    idedma_ctl);
2162 	}
2163 	pciide_print_modes(cp);
2164 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2165 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2166 }
2167 
2168 void
2169 cmd_channel_map(pa, sc, channel)
2170 	struct pci_attach_args *pa;
2171 	struct pciide_softc *sc;
2172 	int channel;
2173 {
2174 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2175 	bus_size_t cmdsize, ctlsize;
2176 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2177 	int interface;
2178 
2179 	/*
2180 	 * The 0648/0649 can be told to identify as a RAID controller.
2181 	 * In this case, we have to fake interface
2182 	 */
2183 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2184 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2185 		    PCIIDE_INTERFACE_SETTABLE(1);
2186 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2187 		    CMD_CONF_DSA1)
2188 			interface |= PCIIDE_INTERFACE_PCI(0) |
2189 			    PCIIDE_INTERFACE_PCI(1);
2190 	} else {
2191 		interface = PCI_INTERFACE(pa->pa_class);
2192 	}
2193 
2194 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2195 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2196 	cp->wdc_channel.channel = channel;
2197 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2198 
2199 	if (channel > 0) {
2200 		cp->wdc_channel.ch_queue =
2201 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2202 	} else {
2203 		cp->wdc_channel.ch_queue =
2204 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2205 	}
2206 	if (cp->wdc_channel.ch_queue == NULL) {
2207 		printf("%s %s channel: "
2208 		    "can't allocate memory for command queue",
2209 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2210 		    return;
2211 	}
2212 
2213 	printf("%s: %s channel %s to %s mode\n",
2214 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2215 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2216 	    "configured" : "wired",
2217 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2218 	    "native-PCI" : "compatibility");
2219 
2220 	/*
2221 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2222 	 * there's no way to disable the first channel without disabling
2223 	 * the whole device
2224 	 */
2225 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2226 		printf("%s: %s channel ignored (disabled)\n",
2227 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2228 		return;
2229 	}
2230 
2231 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2232 	if (cp->hw_ok == 0)
2233 		return;
2234 	if (channel == 1) {
2235 		if (pciide_chan_candisable(cp)) {
2236 			ctrl &= ~CMD_CTRL_2PORT;
2237 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2238 			    CMD_CTRL, ctrl);
2239 		}
2240 	}
2241 	pciide_map_compat_intr(pa, cp, channel, interface);
2242 }
2243 
2244 int
2245 cmd_pci_intr(arg)
2246 	void *arg;
2247 {
2248 	struct pciide_softc *sc = arg;
2249 	struct pciide_channel *cp;
2250 	struct channel_softc *wdc_cp;
2251 	int i, rv, crv;
2252 	u_int32_t priirq, secirq;
2253 
2254 	rv = 0;
2255 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2256 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2257 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2258 		cp = &sc->pciide_channels[i];
2259 		wdc_cp = &cp->wdc_channel;
2260 		/* If a compat channel skip. */
2261 		if (cp->compat)
2262 			continue;
2263 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2264 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2265 			crv = wdcintr(wdc_cp);
2266 			if (crv == 0)
2267 				printf("%s:%d: bogus intr\n",
2268 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2269 			else
2270 				rv = 1;
2271 		}
2272 	}
2273 	return rv;
2274 }
2275 
2276 void
2277 cmd_chip_map(sc, pa)
2278 	struct pciide_softc *sc;
2279 	struct pci_attach_args *pa;
2280 {
2281 	int channel;
2282 
2283 	/*
2284 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2285 	 * and base adresses registers can be disabled at
2286 	 * hardware level. In this case, the device is wired
2287 	 * in compat mode and its first channel is always enabled,
2288 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2289 	 * In fact, it seems that the first channel of the CMD PCI0640
2290 	 * can't be disabled.
2291 	 */
2292 
2293 #ifdef PCIIDE_CMD064x_DISABLE
2294 	if (pciide_chipen(sc, pa) == 0)
2295 		return;
2296 #endif
2297 
2298 	printf("%s: hardware does not support DMA\n",
2299 	    sc->sc_wdcdev.sc_dev.dv_xname);
2300 	sc->sc_dma_ok = 0;
2301 
2302 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2303 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2304 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2305 
2306 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2307 		cmd_channel_map(pa, sc, channel);
2308 	}
2309 }
2310 
2311 void
2312 cmd0643_9_chip_map(sc, pa)
2313 	struct pciide_softc *sc;
2314 	struct pci_attach_args *pa;
2315 {
2316 	struct pciide_channel *cp;
2317 	int channel;
2318 	int rev = PCI_REVISION(
2319 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2320 
2321 	/*
2322 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2323 	 * and base adresses registers can be disabled at
2324 	 * hardware level. In this case, the device is wired
2325 	 * in compat mode and its first channel is always enabled,
2326 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2327 	 * In fact, it seems that the first channel of the CMD PCI0640
2328 	 * can't be disabled.
2329 	 */
2330 
2331 #ifdef PCIIDE_CMD064x_DISABLE
2332 	if (pciide_chipen(sc, pa) == 0)
2333 		return;
2334 #endif
2335 	printf("%s: bus-master DMA support present",
2336 	    sc->sc_wdcdev.sc_dev.dv_xname);
2337 	pciide_mapreg_dma(sc, pa);
2338 	printf("\n");
2339 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2340 	    WDC_CAPABILITY_MODE;
2341 	if (sc->sc_dma_ok) {
2342 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2343 		switch (sc->sc_pp->ide_product) {
2344 		case PCI_PRODUCT_CMDTECH_649:
2345 		case PCI_PRODUCT_CMDTECH_648:
2346 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2347 			sc->sc_wdcdev.UDMA_cap = 4;
2348 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2349 			break;
2350 		case PCI_PRODUCT_CMDTECH_646:
2351 			if (rev >= CMD0646U2_REV) {
2352 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2353 				sc->sc_wdcdev.UDMA_cap = 2;
2354 			} else if (rev >= CMD0646U_REV) {
2355 			/*
2356 			 * Linux's driver claims that the 646U is broken
2357 			 * with UDMA. Only enable it if we know what we're
2358 			 * doing
2359 			 */
2360 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2361 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2362 				sc->sc_wdcdev.UDMA_cap = 2;
2363 #endif
2364 				/* explicitely disable UDMA */
2365 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2366 				    CMD_UDMATIM(0), 0);
2367 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2368 				    CMD_UDMATIM(1), 0);
2369 			}
2370 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2371 			break;
2372 		default:
2373 			sc->sc_wdcdev.irqack = pciide_irqack;
2374 		}
2375 	}
2376 
2377 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2378 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2379 	sc->sc_wdcdev.PIO_cap = 4;
2380 	sc->sc_wdcdev.DMA_cap = 2;
2381 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2382 
2383 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2384 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2385 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2386 		DEBUG_PROBE);
2387 
2388 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2389 		cp = &sc->pciide_channels[channel];
2390 		cmd_channel_map(pa, sc, channel);
2391 		if (cp->hw_ok == 0)
2392 			continue;
2393 		cmd0643_9_setup_channel(&cp->wdc_channel);
2394 	}
2395 	/*
2396 	 * note - this also makes sure we clear the irq disable and reset
2397 	 * bits
2398 	 */
2399 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2400 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2401 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2402 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2403 	    DEBUG_PROBE);
2404 }
2405 
2406 void
2407 cmd0643_9_setup_channel(chp)
2408 	struct channel_softc *chp;
2409 {
2410 	struct ata_drive_datas *drvp;
2411 	u_int8_t tim;
2412 	u_int32_t idedma_ctl, udma_reg;
2413 	int drive;
2414 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2415 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2416 
2417 	idedma_ctl = 0;
2418 	/* setup DMA if needed */
2419 	pciide_channel_dma_setup(cp);
2420 
2421 	for (drive = 0; drive < 2; drive++) {
2422 		drvp = &chp->ch_drive[drive];
2423 		/* If no drive, skip */
2424 		if ((drvp->drive_flags & DRIVE) == 0)
2425 			continue;
2426 		/* add timing values, setup DMA if needed */
2427 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2428 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2429 			if (drvp->drive_flags & DRIVE_UDMA) {
2430 				/* UltraDMA on a 646U2, 0648 or 0649 */
2431 				drvp->drive_flags &= ~DRIVE_DMA;
2432 				udma_reg = pciide_pci_read(sc->sc_pc,
2433 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2434 				if (drvp->UDMA_mode > 2 &&
2435 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2436 				    CMD_BICSR) &
2437 				    CMD_BICSR_80(chp->channel)) == 0)
2438 					drvp->UDMA_mode = 2;
2439 				if (drvp->UDMA_mode > 2)
2440 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2441 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2442 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2443 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2444 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2445 				    CMD_UDMATIM_TIM_OFF(drive));
2446 				udma_reg |=
2447 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2448 				    CMD_UDMATIM_TIM_OFF(drive));
2449 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2450 				    CMD_UDMATIM(chp->channel), udma_reg);
2451 			} else {
2452 				/*
2453 				 * use Multiword DMA.
2454 				 * Timings will be used for both PIO and DMA,
2455 				 * so adjust DMA mode if needed
2456 				 * if we have a 0646U2/8/9, turn off UDMA
2457 				 */
2458 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2459 					udma_reg = pciide_pci_read(sc->sc_pc,
2460 					    sc->sc_tag,
2461 					    CMD_UDMATIM(chp->channel));
2462 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2463 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2464 					    CMD_UDMATIM(chp->channel),
2465 					    udma_reg);
2466 				}
2467 				if (drvp->PIO_mode >= 3 &&
2468 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2469 					drvp->DMA_mode = drvp->PIO_mode - 2;
2470 				}
2471 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2472 			}
2473 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2474 		}
2475 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2476 		    CMD_DATA_TIM(chp->channel, drive), tim);
2477 	}
2478 	if (idedma_ctl != 0) {
2479 		/* Add software bits in status register */
2480 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2481 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2482 		    idedma_ctl);
2483 	}
2484 	pciide_print_modes(cp);
2485 }
2486 
2487 void
2488 cmd646_9_irqack(chp)
2489 	struct channel_softc *chp;
2490 {
2491 	u_int32_t priirq, secirq;
2492 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2493 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2494 
2495 	if (chp->channel == 0) {
2496 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2497 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2498 	} else {
2499 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2500 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2501 	}
2502 	pciide_irqack(chp);
2503 }
2504 
2505 void
2506 cy693_chip_map(sc, pa)
2507 	struct pciide_softc *sc;
2508 	struct pci_attach_args *pa;
2509 {
2510 	struct pciide_channel *cp;
2511 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2512 	bus_size_t cmdsize, ctlsize;
2513 
2514 	if (pciide_chipen(sc, pa) == 0)
2515 		return;
2516 	/*
2517 	 * this chip has 2 PCI IDE functions, one for primary and one for
2518 	 * secondary. So we need to call pciide_mapregs_compat() with
2519 	 * the real channel
2520 	 */
2521 	if (pa->pa_function == 1) {
2522 		sc->sc_cy_compatchan = 0;
2523 	} else if (pa->pa_function == 2) {
2524 		sc->sc_cy_compatchan = 1;
2525 	} else {
2526 		printf("%s: unexpected PCI function %d\n",
2527 		    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2528 		return;
2529 	}
2530 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2531 		printf("%s: bus-master DMA support present",
2532 		    sc->sc_wdcdev.sc_dev.dv_xname);
2533 		pciide_mapreg_dma(sc, pa);
2534 	} else {
2535 		printf("%s: hardware does not support DMA",
2536 		    sc->sc_wdcdev.sc_dev.dv_xname);
2537 		sc->sc_dma_ok = 0;
2538 	}
2539 	printf("\n");
2540 
2541 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2542 	if (sc->sc_cy_handle == NULL) {
2543 		printf("%s: unable to map hyperCache control registers\n",
2544 		    sc->sc_wdcdev.sc_dev.dv_xname);
2545 		sc->sc_dma_ok = 0;
2546 	}
2547 
2548 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2549 	    WDC_CAPABILITY_MODE;
2550 	if (sc->sc_dma_ok) {
2551 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2552 		sc->sc_wdcdev.irqack = pciide_irqack;
2553 	}
2554 	sc->sc_wdcdev.PIO_cap = 4;
2555 	sc->sc_wdcdev.DMA_cap = 2;
2556 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2557 
2558 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2559 	sc->sc_wdcdev.nchannels = 1;
2560 
2561 	/* Only one channel for this chip; if we are here it's enabled */
2562 	cp = &sc->pciide_channels[0];
2563 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2564 	cp->name = PCIIDE_CHANNEL_NAME(0);
2565 	cp->wdc_channel.channel = 0;
2566 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2567 	cp->wdc_channel.ch_queue =
2568 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2569 	if (cp->wdc_channel.ch_queue == NULL) {
2570 		printf("%s primary channel: "
2571 		    "can't allocate memory for command queue",
2572 		sc->sc_wdcdev.sc_dev.dv_xname);
2573 		return;
2574 	}
2575 	printf("%s: primary channel %s to ",
2576 	    sc->sc_wdcdev.sc_dev.dv_xname,
2577 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2578 	    "configured" : "wired");
2579 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2580 		printf("native-PCI");
2581 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2582 		    pciide_pci_intr);
2583 	} else {
2584 		printf("compatibility");
2585 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2586 		    &cmdsize, &ctlsize);
2587 	}
2588 	printf(" mode\n");
2589 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2590 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2591 	wdcattach(&cp->wdc_channel);
2592 	if (pciide_chan_candisable(cp)) {
2593 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2594 		    PCI_COMMAND_STATUS_REG, 0);
2595 	}
2596 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2597 	if (cp->hw_ok == 0)
2598 		return;
2599 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2600 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2601 	cy693_setup_channel(&cp->wdc_channel);
2602 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2603 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2604 }
2605 
2606 void
2607 cy693_setup_channel(chp)
2608 	struct channel_softc *chp;
2609 {
2610 	struct ata_drive_datas *drvp;
2611 	int drive;
2612 	u_int32_t cy_cmd_ctrl;
2613 	u_int32_t idedma_ctl;
2614 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2615 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2616 	int dma_mode = -1;
2617 
2618 	cy_cmd_ctrl = idedma_ctl = 0;
2619 
2620 	/* setup DMA if needed */
2621 	pciide_channel_dma_setup(cp);
2622 
2623 	for (drive = 0; drive < 2; drive++) {
2624 		drvp = &chp->ch_drive[drive];
2625 		/* If no drive, skip */
2626 		if ((drvp->drive_flags & DRIVE) == 0)
2627 			continue;
2628 		/* add timing values, setup DMA if needed */
2629 		if (drvp->drive_flags & DRIVE_DMA) {
2630 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2631 			/* use Multiword DMA */
2632 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2633 				dma_mode = drvp->DMA_mode;
2634 		}
2635 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2636 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2637 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2638 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2639 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2640 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2641 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2642 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2643 	}
2644 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2645 	chp->ch_drive[0].DMA_mode = dma_mode;
2646 	chp->ch_drive[1].DMA_mode = dma_mode;
2647 
2648 	if (dma_mode == -1)
2649 		dma_mode = 0;
2650 
2651 	if (sc->sc_cy_handle != NULL) {
2652 		/* Note: `multiple' is implied. */
2653 		cy82c693_write(sc->sc_cy_handle,
2654 		    (sc->sc_cy_compatchan == 0) ?
2655 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2656 	}
2657 
2658 	pciide_print_modes(cp);
2659 
2660 	if (idedma_ctl != 0) {
2661 		/* Add software bits in status register */
2662 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2663 		    IDEDMA_CTL, idedma_ctl);
2664 	}
2665 }
2666 
2667 void
2668 sis_chip_map(sc, pa)
2669 	struct pciide_softc *sc;
2670 	struct pci_attach_args *pa;
2671 {
2672 	struct pciide_channel *cp;
2673 	int channel;
2674 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2675 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2676 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2677 	bus_size_t cmdsize, ctlsize;
2678 
2679 	if (pciide_chipen(sc, pa) == 0)
2680 		return;
2681 	printf("%s: bus-master DMA support present",
2682 	    sc->sc_wdcdev.sc_dev.dv_xname);
2683 	pciide_mapreg_dma(sc, pa);
2684 	printf("\n");
2685 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2686 	    WDC_CAPABILITY_MODE;
2687 	if (sc->sc_dma_ok) {
2688 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2689 		sc->sc_wdcdev.irqack = pciide_irqack;
2690 		if (rev > 0xd0)
2691 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2692 	}
2693 
2694 	sc->sc_wdcdev.PIO_cap = 4;
2695 	sc->sc_wdcdev.DMA_cap = 2;
2696 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2697 		sc->sc_wdcdev.UDMA_cap = 2;
2698 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2699 
2700 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2701 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2702 
2703 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2704 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2705 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2706 
2707 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2708 		cp = &sc->pciide_channels[channel];
2709 		if (pciide_chansetup(sc, channel, interface) == 0)
2710 			continue;
2711 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2712 		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2713 			printf("%s: %s channel ignored (disabled)\n",
2714 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2715 			continue;
2716 		}
2717 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2718 		    pciide_pci_intr);
2719 		if (cp->hw_ok == 0)
2720 			continue;
2721 		if (pciide_chan_candisable(cp)) {
2722 			if (channel == 0)
2723 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2724 			else
2725 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2726 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2727 			    sis_ctr0);
2728 		}
2729 		pciide_map_compat_intr(pa, cp, channel, interface);
2730 		if (cp->hw_ok == 0)
2731 			continue;
2732 		sis_setup_channel(&cp->wdc_channel);
2733 	}
2734 }
2735 
2736 void
2737 sis_setup_channel(chp)
2738 	struct channel_softc *chp;
2739 {
2740 	struct ata_drive_datas *drvp;
2741 	int drive;
2742 	u_int32_t sis_tim;
2743 	u_int32_t idedma_ctl;
2744 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2745 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2746 
2747 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2748 	    "channel %d 0x%x\n", chp->channel,
2749 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2750 	    DEBUG_PROBE);
2751 	sis_tim = 0;
2752 	idedma_ctl = 0;
2753 	/* setup DMA if needed */
2754 	pciide_channel_dma_setup(cp);
2755 
2756 	for (drive = 0; drive < 2; drive++) {
2757 		drvp = &chp->ch_drive[drive];
2758 		/* If no drive, skip */
2759 		if ((drvp->drive_flags & DRIVE) == 0)
2760 			continue;
2761 		/* add timing values, setup DMA if needed */
2762 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2763 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2764 			goto pio;
2765 
2766 		if (drvp->drive_flags & DRIVE_UDMA) {
2767 			/* use Ultra/DMA */
2768 			drvp->drive_flags &= ~DRIVE_DMA;
2769 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2770 			    SIS_TIM_UDMA_TIME_OFF(drive);
2771 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2772 		} else {
2773 			/*
2774 			 * use Multiword DMA
2775 			 * Timings will be used for both PIO and DMA,
2776 			 * so adjust DMA mode if needed
2777 			 */
2778 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2779 				drvp->PIO_mode = drvp->DMA_mode + 2;
2780 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2781 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2782 				    drvp->PIO_mode - 2 : 0;
2783 			if (drvp->DMA_mode == 0)
2784 				drvp->PIO_mode = 0;
2785 		}
2786 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2787 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2788 		    SIS_TIM_ACT_OFF(drive);
2789 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2790 		    SIS_TIM_REC_OFF(drive);
2791 	}
2792 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2793 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2794 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2795 	if (idedma_ctl != 0) {
2796 		/* Add software bits in status register */
2797 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2798 		    IDEDMA_CTL, idedma_ctl);
2799 	}
2800 	pciide_print_modes(cp);
2801 }
2802 
2803 void
2804 acer_chip_map(sc, pa)
2805 	struct pciide_softc *sc;
2806 	struct pci_attach_args *pa;
2807 {
2808 	struct pciide_channel *cp;
2809 	int channel;
2810 	pcireg_t cr, interface;
2811 	bus_size_t cmdsize, ctlsize;
2812 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2813 
2814 	if (pciide_chipen(sc, pa) == 0)
2815 		return;
2816 	printf("%s: bus-master DMA support present",
2817 	    sc->sc_wdcdev.sc_dev.dv_xname);
2818 	pciide_mapreg_dma(sc, pa);
2819 	printf("\n");
2820 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2821 	    WDC_CAPABILITY_MODE;
2822 	if (sc->sc_dma_ok) {
2823 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2824 		if (rev >= 0x20)
2825 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2826 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2827 		sc->sc_wdcdev.irqack = pciide_irqack;
2828 	}
2829 
2830 	sc->sc_wdcdev.PIO_cap = 4;
2831 	sc->sc_wdcdev.DMA_cap = 2;
2832 	sc->sc_wdcdev.UDMA_cap = 2;
2833 	sc->sc_wdcdev.set_modes = acer_setup_channel;
2834 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2835 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2836 
2837 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2838 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2839 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2840 
2841 	/* Enable "microsoft register bits" R/W. */
2842 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2843 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2844 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2845 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2846 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2847 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2848 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2849 	    ~ACER_CHANSTATUSREGS_RO);
2850 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2851 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2852 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2853 	/* Don't use cr, re-read the real register content instead */
2854 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2855 	    PCI_CLASS_REG));
2856 
2857 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2858 		cp = &sc->pciide_channels[channel];
2859 		if (pciide_chansetup(sc, channel, interface) == 0)
2860 			continue;
2861 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2862 			printf("%s: %s channel ignored (disabled)\n",
2863 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2864 			continue;
2865 		}
2866 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2867 		    acer_pci_intr);
2868 		if (cp->hw_ok == 0)
2869 			continue;
2870 		if (pciide_chan_candisable(cp)) {
2871 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
2872 			pci_conf_write(sc->sc_pc, sc->sc_tag,
2873 			    PCI_CLASS_REG, cr);
2874 		}
2875 		pciide_map_compat_intr(pa, cp, channel, interface);
2876 		acer_setup_channel(&cp->wdc_channel);
2877 	}
2878 }
2879 
2880 void
2881 acer_setup_channel(chp)
2882 	struct channel_softc *chp;
2883 {
2884 	struct ata_drive_datas *drvp;
2885 	int drive;
2886 	u_int32_t acer_fifo_udma;
2887 	u_int32_t idedma_ctl;
2888 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2889 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2890 
2891 	idedma_ctl = 0;
2892 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
2893 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
2894 	    acer_fifo_udma), DEBUG_PROBE);
2895 	/* setup DMA if needed */
2896 	pciide_channel_dma_setup(cp);
2897 
2898 	for (drive = 0; drive < 2; drive++) {
2899 		drvp = &chp->ch_drive[drive];
2900 		/* If no drive, skip */
2901 		if ((drvp->drive_flags & DRIVE) == 0)
2902 			continue;
2903 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
2904 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
2905 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
2906 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
2907 		/* clear FIFO/DMA mode */
2908 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
2909 		    ACER_UDMA_EN(chp->channel, drive) |
2910 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
2911 
2912 		/* add timing values, setup DMA if needed */
2913 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2914 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
2915 			acer_fifo_udma |=
2916 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
2917 			goto pio;
2918 		}
2919 
2920 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
2921 		if (drvp->drive_flags & DRIVE_UDMA) {
2922 			/* use Ultra/DMA */
2923 			drvp->drive_flags &= ~DRIVE_DMA;
2924 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
2925 			acer_fifo_udma |=
2926 			    ACER_UDMA_TIM(chp->channel, drive,
2927 				acer_udma[drvp->UDMA_mode]);
2928 		} else {
2929 			/*
2930 			 * use Multiword DMA
2931 			 * Timings will be used for both PIO and DMA,
2932 			 * so adjust DMA mode if needed
2933 			 */
2934 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2935 				drvp->PIO_mode = drvp->DMA_mode + 2;
2936 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2937 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2938 				    drvp->PIO_mode - 2 : 0;
2939 			if (drvp->DMA_mode == 0)
2940 				drvp->PIO_mode = 0;
2941 		}
2942 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2943 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2944 		    ACER_IDETIM(chp->channel, drive),
2945 		    acer_pio[drvp->PIO_mode]);
2946 	}
2947 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
2948 	    acer_fifo_udma), DEBUG_PROBE);
2949 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
2950 	if (idedma_ctl != 0) {
2951 		/* Add software bits in status register */
2952 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2953 		    IDEDMA_CTL, idedma_ctl);
2954 	}
2955 	pciide_print_modes(cp);
2956 }
2957 
2958 int
2959 acer_pci_intr(arg)
2960 	void *arg;
2961 {
2962 	struct pciide_softc *sc = arg;
2963 	struct pciide_channel *cp;
2964 	struct channel_softc *wdc_cp;
2965 	int i, rv, crv;
2966 	u_int32_t chids;
2967 
2968 	rv = 0;
2969 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
2970 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2971 		cp = &sc->pciide_channels[i];
2972 		wdc_cp = &cp->wdc_channel;
2973 		/* If a compat channel skip. */
2974 		if (cp->compat)
2975 			continue;
2976 		if (chids & ACER_CHIDS_INT(i)) {
2977 			crv = wdcintr(wdc_cp);
2978 			if (crv == 0)
2979 				printf("%s:%d: bogus intr\n",
2980 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2981 			else
2982 				rv = 1;
2983 		}
2984 	}
2985 	return rv;
2986 }
2987 
2988 void
2989 hpt_chip_map(sc, pa)
2990 	struct pciide_softc *sc;
2991 	struct pci_attach_args *pa;
2992 {
2993 	struct pciide_channel *cp;
2994 	int i, compatchan, revision;
2995 	pcireg_t interface;
2996 	bus_size_t cmdsize, ctlsize;
2997 
2998 	if (pciide_chipen(sc, pa) == 0)
2999 		return;
3000 	revision = PCI_REVISION(pa->pa_class);
3001 
3002 	/*
3003 	 * when the chip is in native mode it identifies itself as a
3004 	 * 'misc mass storage'. Fake interface in this case.
3005 	 */
3006 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3007 		interface = PCI_INTERFACE(pa->pa_class);
3008 	} else {
3009 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3010 		    PCIIDE_INTERFACE_PCI(0);
3011 		if (revision == HPT370_REV)
3012 			interface |= PCIIDE_INTERFACE_PCI(1);
3013 	}
3014 
3015 	printf("%s: bus-master DMA support present",
3016 		sc->sc_wdcdev.sc_dev.dv_xname);
3017 	pciide_mapreg_dma(sc, pa);
3018 	printf("\n");
3019 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3020 	    WDC_CAPABILITY_MODE;
3021 	if (sc->sc_dma_ok) {
3022 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3023 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3024 		sc->sc_wdcdev.irqack = pciide_irqack;
3025 	}
3026 	sc->sc_wdcdev.PIO_cap = 4;
3027 	sc->sc_wdcdev.DMA_cap = 2;
3028 
3029 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3030 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3031 	if (revision == HPT366_REV) {
3032 		sc->sc_wdcdev.UDMA_cap = 4;
3033 		/*
3034 		 * The 366 has 2 PCI IDE functions, one for primary and one
3035 		 * for secondary. So we need to call pciide_mapregs_compat()
3036 		 * with the real channel
3037 		 */
3038 		if (pa->pa_function == 0) {
3039 			compatchan = 0;
3040 		} else if (pa->pa_function == 1) {
3041 			compatchan = 1;
3042 		} else {
3043 			printf("%s: unexpected PCI function %d\n",
3044 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3045 			return;
3046 		}
3047 		sc->sc_wdcdev.nchannels = 1;
3048 	} else {
3049 		sc->sc_wdcdev.nchannels = 2;
3050 		sc->sc_wdcdev.UDMA_cap = 5;
3051 	}
3052 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3053 		cp = &sc->pciide_channels[i];
3054 		if (sc->sc_wdcdev.nchannels > 1) {
3055 			compatchan = i;
3056 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3057 			   HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3058 				printf("%s: %s channel ignored (disabled)\n",
3059 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3060 				continue;
3061 			}
3062 		}
3063 		if (pciide_chansetup(sc, i, interface) == 0)
3064 			continue;
3065 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3066 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3067 			    &ctlsize, hpt_pci_intr);
3068 		} else {
3069 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3070 			    &cmdsize, &ctlsize);
3071 		}
3072 		if (cp->hw_ok == 0)
3073 			return;
3074 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3075 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3076 		wdcattach(&cp->wdc_channel);
3077 		hpt_setup_channel(&cp->wdc_channel);
3078 	}
3079 	if (revision == HPT370_REV) {
3080 		/*
3081 		 * HPT370_REV has a bit to disable interrupts, make sure
3082 		 * to clear it
3083 		 */
3084 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3085 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3086 		    ~HPT_CSEL_IRQDIS);
3087 	}
3088 	return;
3089 }
3090 
3091 void
3092 hpt_setup_channel(chp)
3093 	struct channel_softc *chp;
3094 {
3095 	struct ata_drive_datas *drvp;
3096 	int drive;
3097 	int cable;
3098 	u_int32_t before, after;
3099 	u_int32_t idedma_ctl;
3100 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3101 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3102 
3103 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3104 
3105 	/* setup DMA if needed */
3106 	pciide_channel_dma_setup(cp);
3107 
3108 	idedma_ctl = 0;
3109 
3110 	/* Per drive settings */
3111 	for (drive = 0; drive < 2; drive++) {
3112 		drvp = &chp->ch_drive[drive];
3113 		/* If no drive, skip */
3114 		if ((drvp->drive_flags & DRIVE) == 0)
3115 			continue;
3116 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3117 					HPT_IDETIM(chp->channel, drive));
3118 
3119 		/* add timing values, setup DMA if needed */
3120 		if (drvp->drive_flags & DRIVE_UDMA) {
3121 			/* use Ultra/DMA */
3122 			drvp->drive_flags &= ~DRIVE_DMA;
3123 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3124 			    drvp->UDMA_mode > 2)
3125 				drvp->UDMA_mode = 2;
3126 			after = (sc->sc_wdcdev.nchannels == 2) ?
3127 			    hpt370_udma[drvp->UDMA_mode] :
3128 			    hpt366_udma[drvp->UDMA_mode];
3129 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3130 		} else if (drvp->drive_flags & DRIVE_DMA) {
3131 			/*
3132 			 * use Multiword DMA.
3133 			 * Timings will be used for both PIO and DMA, so adjust
3134 			 * DMA mode if needed
3135 			 */
3136 			if (drvp->PIO_mode >= 3 &&
3137 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3138 				drvp->DMA_mode = drvp->PIO_mode - 2;
3139 			}
3140 			after = (sc->sc_wdcdev.nchannels == 2) ?
3141 			    hpt370_dma[drvp->DMA_mode] :
3142 			    hpt366_dma[drvp->DMA_mode];
3143 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3144 		} else {
3145 			/* PIO only */
3146 			after = (sc->sc_wdcdev.nchannels == 2) ?
3147 			    hpt370_pio[drvp->PIO_mode] :
3148 			    hpt366_pio[drvp->PIO_mode];
3149 		}
3150 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3151 		    HPT_IDETIM(chp->channel, drive), after);
3152 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3153 		    "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3154 		    after, before), DEBUG_PROBE);
3155 	}
3156 	if (idedma_ctl != 0) {
3157 		/* Add software bits in status register */
3158 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3159 		    IDEDMA_CTL, idedma_ctl);
3160 	}
3161 	pciide_print_modes(cp);
3162 }
3163 
3164 int
3165 hpt_pci_intr(arg)
3166 	void *arg;
3167 {
3168 	struct pciide_softc *sc = arg;
3169 	struct pciide_channel *cp;
3170 	struct channel_softc *wdc_cp;
3171 	int rv = 0;
3172 	int dmastat, i, crv;
3173 
3174 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3175 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3176 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3177 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3178 			continue;
3179 		cp = &sc->pciide_channels[i];
3180 		wdc_cp = &cp->wdc_channel;
3181 		crv = wdcintr(wdc_cp);
3182 		if (crv == 0) {
3183 			printf("%s:%d: bogus intr\n",
3184 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3185 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3186 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3187 		} else
3188 			rv = 1;
3189 	}
3190 	return rv;
3191 }
3192 
3193 
3194 /* Macros to test product */
3195 #define PDC_IS_262(sc)							\
3196 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||	\
3197 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3198 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3199 #define PDC_IS_265(sc)							\
3200 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3201 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3202 
3203 void
3204 pdc202xx_chip_map(sc, pa)
3205 	struct pciide_softc *sc;
3206 	struct pci_attach_args *pa;
3207 {
3208 	struct pciide_channel *cp;
3209 	int channel;
3210 	pcireg_t interface, st, mode;
3211 	bus_size_t cmdsize, ctlsize;
3212 
3213 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3214 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3215 	    DEBUG_PROBE);
3216 	if (pciide_chipen(sc, pa) == 0)
3217 		return;
3218 
3219 	/* turn off  RAID mode */
3220 	st &= ~PDC2xx_STATE_IDERAID;
3221 
3222 	/*
3223 	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3224 	 * mode. We have to fake interface
3225 	 */
3226 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3227 	if (st & PDC2xx_STATE_NATIVE)
3228 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3229 
3230 	printf("%s: bus-master DMA support present",
3231 	    sc->sc_wdcdev.sc_dev.dv_xname);
3232 	pciide_mapreg_dma(sc, pa);
3233 	printf("\n");
3234 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3235 	    WDC_CAPABILITY_MODE;
3236 	if (sc->sc_dma_ok) {
3237 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3238 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3239 		sc->sc_wdcdev.irqack = pciide_irqack;
3240 	}
3241 	sc->sc_wdcdev.PIO_cap = 4;
3242 	sc->sc_wdcdev.DMA_cap = 2;
3243 	if (PDC_IS_265(sc))
3244 		sc->sc_wdcdev.UDMA_cap = 5;
3245 	else if (PDC_IS_262(sc))
3246 		sc->sc_wdcdev.UDMA_cap = 4;
3247 	else
3248 		sc->sc_wdcdev.UDMA_cap = 2;
3249 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3250 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3251 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3252 
3253 	/* setup failsafe defaults */
3254 	mode = 0;
3255 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3256 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3257 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3258 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3259 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3260 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3261 		    "initial timings  0x%x, now 0x%x\n", channel,
3262 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3263 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3264 		    DEBUG_PROBE);
3265 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3266 		    mode | PDC2xx_TIM_IORDYp);
3267 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3268 		    "initial timings  0x%x, now 0x%x\n", channel,
3269 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3270 		    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3271 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3272 		    mode);
3273 	}
3274 
3275 	mode = PDC2xx_SCR_DMA;
3276 	if (PDC_IS_262(sc)) {
3277 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3278 	} else {
3279 		/* the BIOS set it up this way */
3280 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3281 	}
3282 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3283 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3284 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3285 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3286 	    DEBUG_PROBE);
3287 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3288 
3289 	/* controller initial state register is OK even without BIOS */
3290 	/* Set DMA mode to IDE DMA compatibility */
3291 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3292 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3293 	    DEBUG_PROBE);
3294 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3295 	    mode | 0x1);
3296 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3297 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3298 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3299 	    mode | 0x1);
3300 
3301 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3302 		cp = &sc->pciide_channels[channel];
3303 		if (pciide_chansetup(sc, channel, interface) == 0)
3304 			continue;
3305 		if ((st & (PDC_IS_262(sc) ?
3306 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3307 			printf("%s: %s channel ignored (disabled)\n",
3308 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3309 			continue;
3310 		}
3311 		if (PDC_IS_265(sc))
3312 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3313 			    pdc20265_pci_intr);
3314 		else
3315 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3316 			    pdc202xx_pci_intr);
3317 		if (cp->hw_ok == 0)
3318 			continue;
3319 		if (pciide_chan_candisable(cp))
3320 			st &= ~(PDC_IS_262(sc) ?
3321 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3322 		pciide_map_compat_intr(pa, cp, channel, interface);
3323 		pdc202xx_setup_channel(&cp->wdc_channel);
3324 	}
3325 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3326 	    DEBUG_PROBE);
3327 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3328 	return;
3329 }
3330 
3331 void
3332 pdc202xx_setup_channel(chp)
3333 	struct channel_softc *chp;
3334 {
3335 	struct ata_drive_datas *drvp;
3336 	int drive;
3337 	pcireg_t mode, st;
3338 	u_int32_t idedma_ctl, scr, atapi;
3339 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3340 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3341 	int channel = chp->channel;
3342 
3343 	/* setup DMA if needed */
3344 	pciide_channel_dma_setup(cp);
3345 
3346 	idedma_ctl = 0;
3347 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3348 	    sc->sc_wdcdev.sc_dev.dv_xname,
3349 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3350 	    DEBUG_PROBE);
3351 
3352 	/* Per channel settings */
3353 	if (PDC_IS_262(sc)) {
3354 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3355 		    PDC262_U66);
3356 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3357 		/* Trimm UDMA mode */
3358 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3359 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3360 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3361 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3362 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3363 			if (chp->ch_drive[0].UDMA_mode > 2)
3364 				chp->ch_drive[0].UDMA_mode = 2;
3365 			if (chp->ch_drive[1].UDMA_mode > 2)
3366 				chp->ch_drive[1].UDMA_mode = 2;
3367 		}
3368 		/* Set U66 if needed */
3369 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3370 		    chp->ch_drive[0].UDMA_mode > 2) ||
3371 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3372 		    chp->ch_drive[1].UDMA_mode > 2))
3373 			scr |= PDC262_U66_EN(channel);
3374 		else
3375 			scr &= ~PDC262_U66_EN(channel);
3376 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3377 		    PDC262_U66, scr);
3378 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3379 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3380 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3381 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3382 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3383 			chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3384 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3385 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3386 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3387 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3388 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3389 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3390 				atapi = 0;
3391 			else
3392 				atapi = PDC262_ATAPI_UDMA;
3393 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3394 			    PDC262_ATAPI(channel), atapi);
3395 		}
3396 	}
3397 	for (drive = 0; drive < 2; drive++) {
3398 		drvp = &chp->ch_drive[drive];
3399 		/* If no drive, skip */
3400 		if ((drvp->drive_flags & DRIVE) == 0)
3401 			continue;
3402 		mode = 0;
3403 		if (drvp->drive_flags & DRIVE_UDMA) {
3404 			/* use Ultra/DMA */
3405 			drvp->drive_flags &= ~DRIVE_DMA;
3406 			mode = PDC2xx_TIM_SET_MB(mode,
3407 			    pdc2xx_udma_mb[drvp->UDMA_mode]);
3408 			mode = PDC2xx_TIM_SET_MC(mode,
3409 			    pdc2xx_udma_mc[drvp->UDMA_mode]);
3410 			drvp->drive_flags &= ~DRIVE_DMA;
3411 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3412 		} else if (drvp->drive_flags & DRIVE_DMA) {
3413 			mode = PDC2xx_TIM_SET_MB(mode,
3414 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3415 			mode = PDC2xx_TIM_SET_MC(mode,
3416 			    pdc2xx_dma_mc[drvp->DMA_mode]);
3417 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3418 		} else {
3419 			mode = PDC2xx_TIM_SET_MB(mode,
3420 			    pdc2xx_dma_mb[0]);
3421 			mode = PDC2xx_TIM_SET_MC(mode,
3422 			    pdc2xx_dma_mc[0]);
3423 		}
3424 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3425 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3426 		if (drvp->drive_flags & DRIVE_ATA)
3427 			mode |= PDC2xx_TIM_PRE;
3428 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3429 		if (drvp->PIO_mode >= 3) {
3430 			mode |= PDC2xx_TIM_IORDY;
3431 			if (drive == 0)
3432 				mode |= PDC2xx_TIM_IORDYp;
3433 		}
3434 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3435 		    "timings 0x%x\n",
3436 		    sc->sc_wdcdev.sc_dev.dv_xname,
3437 		    chp->channel, drive, mode), DEBUG_PROBE);
3438 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3439 		    PDC2xx_TIM(chp->channel, drive), mode);
3440 	}
3441 	if (idedma_ctl != 0) {
3442 		/* Add software bits in status register */
3443 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3444 		    IDEDMA_CTL, idedma_ctl);
3445 	}
3446 	pciide_print_modes(cp);
3447 }
3448 
3449 int
3450 pdc202xx_pci_intr(arg)
3451 	void *arg;
3452 {
3453 	struct pciide_softc *sc = arg;
3454 	struct pciide_channel *cp;
3455 	struct channel_softc *wdc_cp;
3456 	int i, rv, crv;
3457 	u_int32_t scr;
3458 
3459 	rv = 0;
3460 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3461 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3462 		cp = &sc->pciide_channels[i];
3463 		wdc_cp = &cp->wdc_channel;
3464 		/* If a compat channel skip. */
3465 		if (cp->compat)
3466 			continue;
3467 		if (scr & PDC2xx_SCR_INT(i)) {
3468 			crv = wdcintr(wdc_cp);
3469 			if (crv == 0)
3470 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3471 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3472 			else
3473 				rv = 1;
3474 		}
3475 	}
3476 	return rv;
3477 }
3478 
3479 int
3480 pdc20265_pci_intr(arg)
3481 	void *arg;
3482 {
3483 	struct pciide_softc *sc = arg;
3484 	struct pciide_channel *cp;
3485 	struct channel_softc *wdc_cp;
3486 	int i, rv, crv;
3487 	u_int32_t dmastat;
3488 
3489 	rv = 0;
3490 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3491 		cp = &sc->pciide_channels[i];
3492 		wdc_cp = &cp->wdc_channel;
3493 		/* If a compat channel skip. */
3494 		if (cp->compat)
3495 			continue;
3496 		/*
3497 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3498 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3499 		 * So use it instead (requires 2 reg reads instead of 1,
3500 		 * but we can't do it another way).
3501 		 */
3502 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3503 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3504 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3505 			continue;
3506 		crv = wdcintr(wdc_cp);
3507 		if (crv == 0)
3508 			printf("%s:%d: bogus intr\n",
3509 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3510 		else
3511 			rv = 1;
3512 	}
3513 	return rv;
3514 }
3515 
3516 void
3517 opti_chip_map(sc, pa)
3518 	struct pciide_softc *sc;
3519 	struct pci_attach_args *pa;
3520 {
3521 	struct pciide_channel *cp;
3522 	bus_size_t cmdsize, ctlsize;
3523 	pcireg_t interface;
3524 	u_int8_t init_ctrl;
3525 	int channel;
3526 
3527 	if (pciide_chipen(sc, pa) == 0)
3528 		return;
3529 	printf("%s: bus-master DMA support present",
3530 	    sc->sc_wdcdev.sc_dev.dv_xname);
3531 	pciide_mapreg_dma(sc, pa);
3532 	printf("\n");
3533 
3534 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3535 	    WDC_CAPABILITY_MODE;
3536 	sc->sc_wdcdev.PIO_cap = 4;
3537 	if (sc->sc_dma_ok) {
3538 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3539 		sc->sc_wdcdev.irqack = pciide_irqack;
3540 		sc->sc_wdcdev.DMA_cap = 2;
3541 	}
3542 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3543 
3544 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3545 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3546 
3547 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3548 	    OPTI_REG_INIT_CONTROL);
3549 
3550 	interface = PCI_INTERFACE(pa->pa_class);
3551 
3552 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3553 		cp = &sc->pciide_channels[channel];
3554 		if (pciide_chansetup(sc, channel, interface) == 0)
3555 			continue;
3556 		if (channel == 1 &&
3557 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3558 			printf("%s: %s channel ignored (disabled)\n",
3559 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3560 			continue;
3561 		}
3562 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3563 		    pciide_pci_intr);
3564 		if (cp->hw_ok == 0)
3565 			continue;
3566 		pciide_map_compat_intr(pa, cp, channel, interface);
3567 		if (cp->hw_ok == 0)
3568 			continue;
3569 		opti_setup_channel(&cp->wdc_channel);
3570 	}
3571 }
3572 
3573 void
3574 opti_setup_channel(chp)
3575 	struct channel_softc *chp;
3576 {
3577 	struct ata_drive_datas *drvp;
3578 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3579 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3580 	int drive, spd;
3581 	int mode[2];
3582 	u_int8_t rv, mr;
3583 
3584 	/*
3585 	 * The `Delay' and `Address Setup Time' fields of the
3586 	 * Miscellaneous Register are always zero initially.
3587 	 */
3588 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3589 	mr &= ~(OPTI_MISC_DELAY_MASK |
3590 		OPTI_MISC_ADDR_SETUP_MASK |
3591 		OPTI_MISC_INDEX_MASK);
3592 
3593 	/* Prime the control register before setting timing values */
3594 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3595 
3596 	/* Determine the clockrate of the PCIbus the chip is attached to */
3597 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3598 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3599 
3600 	/* setup DMA if needed */
3601 	pciide_channel_dma_setup(cp);
3602 
3603 	for (drive = 0; drive < 2; drive++) {
3604 		drvp = &chp->ch_drive[drive];
3605 		/* If no drive, skip */
3606 		if ((drvp->drive_flags & DRIVE) == 0) {
3607 			mode[drive] = -1;
3608 			continue;
3609 		}
3610 
3611 		if ((drvp->drive_flags & DRIVE_DMA)) {
3612 			/*
3613 			 * Timings will be used for both PIO and DMA,
3614 			 * so adjust DMA mode if needed
3615 			 */
3616 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3617 				drvp->PIO_mode = drvp->DMA_mode + 2;
3618 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3619 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3620 				    drvp->PIO_mode - 2 : 0;
3621 			if (drvp->DMA_mode == 0)
3622 				drvp->PIO_mode = 0;
3623 
3624 			mode[drive] = drvp->DMA_mode + 5;
3625 		} else
3626 			mode[drive] = drvp->PIO_mode;
3627 
3628 		if (drive && mode[0] >= 0 &&
3629 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3630 			/*
3631 			 * Can't have two drives using different values
3632 			 * for `Address Setup Time'.
3633 			 * Slow down the faster drive to compensate.
3634 			 */
3635 			int d = (opti_tim_as[spd][mode[0]] >
3636 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3637 
3638 			mode[d] = mode[1-d];
3639 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3640 			chp->ch_drive[d].DMA_mode = 0;
3641 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3642 		}
3643 	}
3644 
3645 	for (drive = 0; drive < 2; drive++) {
3646 		int m;
3647 		if ((m = mode[drive]) < 0)
3648 			continue;
3649 
3650 		/* Set the Address Setup Time and select appropriate index */
3651 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3652 		rv |= OPTI_MISC_INDEX(drive);
3653 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3654 
3655 		/* Set the pulse width and recovery timing parameters */
3656 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3657 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3658 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3659 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3660 
3661 		/* Set the Enhanced Mode register appropriately */
3662 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3663 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3664 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3665 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3666 	}
3667 
3668 	/* Finally, enable the timings */
3669 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3670 
3671 	pciide_print_modes(cp);
3672 }
3673 
3674 #define	ACARD_IS_850(sc)						\
3675 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3676 
3677 void
3678 acard_chip_map(sc, pa)
3679 	struct pciide_softc *sc;
3680 	struct pci_attach_args *pa;
3681 {
3682 	struct pciide_channel *cp;
3683 	int i, compatchan;
3684 	pcireg_t interface;
3685 	bus_size_t cmdsize, ctlsize;
3686 
3687 	if (pciide_chipen(sc, pa) == 0)
3688 		return;
3689 
3690 	/*
3691 	 * when the chip is in native mode it identifies itself as a
3692 	 * 'misc mass storage'. Fake interface in this case.
3693 	 */
3694 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3695 		interface = PCI_INTERFACE(pa->pa_class);
3696 	} else {
3697 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3698 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3699 	}
3700 
3701 	printf("%s: bus-master DMA support present",
3702 	    sc->sc_wdcdev.sc_dev.dv_xname);
3703 	pciide_mapreg_dma(sc, pa);
3704 	printf("\n");
3705 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3706 	    WDC_CAPABILITY_MODE;
3707 
3708 	if (sc->sc_dma_ok) {
3709 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3710 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3711 		sc->sc_wdcdev.irqack = pciide_irqack;
3712 	}
3713 	sc->sc_wdcdev.PIO_cap = 4;
3714 	sc->sc_wdcdev.DMA_cap = 2;
3715 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3716 
3717 	sc->sc_wdcdev.set_modes = acard_setup_channel;
3718 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3719 	sc->sc_wdcdev.nchannels = 2;
3720 
3721 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3722 		cp = &sc->pciide_channels[i];
3723 		if (pciide_chansetup(sc, i, interface) == 0)
3724 			continue;
3725 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3726 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3727 			    &ctlsize, pciide_pci_intr);
3728 		} else {
3729 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3730 			    &cmdsize, &ctlsize);
3731 		}
3732 		if (cp->hw_ok == 0)
3733 			return;
3734 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3735 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3736 		wdcattach(&cp->wdc_channel);
3737 		acard_setup_channel(&cp->wdc_channel);
3738 	}
3739 	if (!ACARD_IS_850(sc)) {
3740 		u_int32_t reg;
3741 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3742 		reg &= ~ATP860_CTRL_INT;
3743 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3744 	}
3745 }
3746 
3747 void
3748 acard_setup_channel(chp)
3749 	struct channel_softc *chp;
3750 {
3751 	struct ata_drive_datas *drvp;
3752 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3753 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3754 	int channel = chp->channel;
3755 	int drive;
3756 	u_int32_t idetime, udma_mode;
3757 	u_int32_t idedma_ctl;
3758 
3759 	/* setup DMA if needed */
3760 	pciide_channel_dma_setup(cp);
3761 
3762 	if (ACARD_IS_850(sc)) {
3763 		idetime = 0;
3764 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3765 		udma_mode &= ~ATP850_UDMA_MASK(channel);
3766 	} else {
3767 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3768 		idetime &= ~ATP860_SETTIME_MASK(channel);
3769 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3770 		udma_mode &= ~ATP860_UDMA_MASK(channel);
3771 	}
3772 
3773 	idedma_ctl = 0;
3774 
3775 	/* Per drive settings */
3776 	for (drive = 0; drive < 2; drive++) {
3777 		drvp = &chp->ch_drive[drive];
3778 		/* If no drive, skip */
3779 		if ((drvp->drive_flags & DRIVE) == 0)
3780 			continue;
3781 		/* add timing values, setup DMA if needed */
3782 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3783 		    (drvp->drive_flags & DRIVE_UDMA)) {
3784 			/* use Ultra/DMA */
3785 			if (ACARD_IS_850(sc)) {
3786 				idetime |= ATP850_SETTIME(drive,
3787 				    acard_act_udma[drvp->UDMA_mode],
3788 				    acard_rec_udma[drvp->UDMA_mode]);
3789 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
3790 				    acard_udma_conf[drvp->UDMA_mode]);
3791 			} else {
3792 				idetime |= ATP860_SETTIME(channel, drive,
3793 				    acard_act_udma[drvp->UDMA_mode],
3794 				    acard_rec_udma[drvp->UDMA_mode]);
3795 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
3796 				    acard_udma_conf[drvp->UDMA_mode]);
3797 			}
3798 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3799 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
3800 		    (drvp->drive_flags & DRIVE_DMA)) {
3801 			/* use Multiword DMA */
3802 			drvp->drive_flags &= ~DRIVE_UDMA;
3803 			if (ACARD_IS_850(sc)) {
3804 				idetime |= ATP850_SETTIME(drive,
3805 				    acard_act_dma[drvp->DMA_mode],
3806 				    acard_rec_dma[drvp->DMA_mode]);
3807 			} else {
3808 				idetime |= ATP860_SETTIME(channel, drive,
3809 				    acard_act_dma[drvp->DMA_mode],
3810 				    acard_rec_dma[drvp->DMA_mode]);
3811 			}
3812 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3813 		} else {
3814 			/* PIO only */
3815 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
3816 			if (ACARD_IS_850(sc)) {
3817 				idetime |= ATP850_SETTIME(drive,
3818 				    acard_act_pio[drvp->PIO_mode],
3819 				    acard_rec_pio[drvp->PIO_mode]);
3820 			} else {
3821 				idetime |= ATP860_SETTIME(channel, drive,
3822 				    acard_act_pio[drvp->PIO_mode],
3823 				    acard_rec_pio[drvp->PIO_mode]);
3824 			}
3825 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
3826 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3827 		    | ATP8x0_CTRL_EN(channel));
3828 		}
3829 	}
3830 
3831 	if (idedma_ctl != 0) {
3832 		/* Add software bits in status register */
3833 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3834 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
3835 	}
3836 	pciide_print_modes(cp);
3837 
3838 	if (ACARD_IS_850(sc)) {
3839 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3840 		    ATP850_IDETIME(channel), idetime);
3841 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
3842 	} else {
3843 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
3844 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
3845 	}
3846 }
3847 
3848 int
3849 acard_pci_intr(arg)
3850 	void *arg;
3851 {
3852 	struct pciide_softc *sc = arg;
3853 	struct pciide_channel *cp;
3854 	struct channel_softc *wdc_cp;
3855 	int rv = 0;
3856 	int dmastat, i, crv;
3857 
3858 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3859 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3860 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3861 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
3862 			continue;
3863 		cp = &sc->pciide_channels[i];
3864 		wdc_cp = &cp->wdc_channel;
3865 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
3866 			(void)wdcintr(wdc_cp);
3867 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3868 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3869 			continue;
3870 		}
3871 		crv = wdcintr(wdc_cp);
3872 		if (crv == 0)
3873 			printf("%s:%d: bogus intr\n",
3874 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3875 		else if (crv == 1)
3876 			rv = 1;
3877 		else if (rv == 0)
3878 			rv = crv;
3879 	}
3880 	return rv;
3881 }
3882