xref: /netbsd-src/sys/dev/pci/pciide.c (revision 8a8f936f250a330d54f8a24ed0e92aadf9743a7b)
1 /*	$NetBSD: pciide.c,v 1.129 2001/09/24 20:03:47 bouyer Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 
37 /*
38  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Christopher G. Demetriou
51  *	for the NetBSD Project.
52  * 4. The name of the author may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * PCI IDE controller driver.
69  *
70  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71  * sys/dev/pci/ppb.c, revision 1.16).
72  *
73  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75  * 5/16/94" from the PCI SIG.
76  *
77  */
78 
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82 
83 #define DEBUG_DMA   0x01
84 #define DEBUG_XFERS  0x02
85 #define DEBUG_FUNCS  0x08
86 #define DEBUG_PROBE  0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 	if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <machine/endian.h>
102 
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/pciide_acard_reg.h>
119 #include <dev/pci/cy82c693var.h>
120 
121 #include "opt_pciide.h"
122 
123 /* inlines for reading/writing 8-bit PCI registers */
124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
125 					      int));
126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
127 					   int, u_int8_t));
128 
129 static __inline u_int8_t
130 pciide_pci_read(pc, pa, reg)
131 	pci_chipset_tag_t pc;
132 	pcitag_t pa;
133 	int reg;
134 {
135 
136 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
137 	    ((reg & 0x03) * 8) & 0xff);
138 }
139 
140 static __inline void
141 pciide_pci_write(pc, pa, reg, val)
142 	pci_chipset_tag_t pc;
143 	pcitag_t pa;
144 	int reg;
145 	u_int8_t val;
146 {
147 	pcireg_t pcival;
148 
149 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
150 	pcival &= ~(0xff << ((reg & 0x03) * 8));
151 	pcival |= (val << ((reg & 0x03) * 8));
152 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
153 }
154 
155 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
156 
157 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158 void piix_setup_channel __P((struct channel_softc*));
159 void piix3_4_setup_channel __P((struct channel_softc*));
160 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
161 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
162 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163 
164 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void amd7x6_setup_channel __P((struct channel_softc*));
166 
167 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void apollo_setup_channel __P((struct channel_softc*));
169 
170 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_setup_channel __P((struct channel_softc*));
173 void cmd_channel_map __P((struct pci_attach_args *,
174 			struct pciide_softc *, int));
175 int  cmd_pci_intr __P((void *));
176 void cmd646_9_irqack __P((struct channel_softc *));
177 
178 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void cy693_setup_channel __P((struct channel_softc*));
180 
181 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void sis_setup_channel __P((struct channel_softc*));
183 
184 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void acer_setup_channel __P((struct channel_softc*));
186 int  acer_pci_intr __P((void *));
187 int  acer_isabr_match __P(( struct pci_attach_args *));
188 
189 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
190 void pdc202xx_setup_channel __P((struct channel_softc*));
191 int  pdc202xx_pci_intr __P((void *));
192 int  pdc20265_pci_intr __P((void *));
193 
194 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void opti_setup_channel __P((struct channel_softc*));
196 
197 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
198 void hpt_setup_channel __P((struct channel_softc*));
199 int  hpt_pci_intr __P((void *));
200 
201 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void acard_setup_channel __P((struct channel_softc*));
203 int  acard_pci_intr __P((void *));
204 
205 #ifdef PCIIDE_WINBOND_ENABLE
206 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
207 #endif
208 
209 void pciide_channel_dma_setup __P((struct pciide_channel *));
210 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
211 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
212 void pciide_dma_start __P((void*, int, int));
213 int  pciide_dma_finish __P((void*, int, int, int));
214 void pciide_irqack __P((struct channel_softc *));
215 void pciide_print_modes __P((struct pciide_channel *));
216 
217 struct pciide_product_desc {
218 	u_int32_t ide_product;
219 	int ide_flags;
220 	const char *ide_name;
221 	/* map and setup chip, probe drives */
222 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
223 };
224 
225 /* Flags for ide_flags */
226 #define IDE_PCI_CLASS_OVERRIDE	0x0001 /* accept even if class != pciide */
227 #define	IDE_16BIT_IOSPACE	0x0002 /* I/O space BARS ignore upper word */
228 
229 /* Default product description for devices not known from this controller */
230 const struct pciide_product_desc default_product_desc = {
231 	0,
232 	0,
233 	"Generic PCI IDE controller",
234 	default_chip_map,
235 };
236 
237 const struct pciide_product_desc pciide_intel_products[] =  {
238 	{ PCI_PRODUCT_INTEL_82092AA,
239 	  0,
240 	  "Intel 82092AA IDE controller",
241 	  default_chip_map,
242 	},
243 	{ PCI_PRODUCT_INTEL_82371FB_IDE,
244 	  0,
245 	  "Intel 82371FB IDE controller (PIIX)",
246 	  piix_chip_map,
247 	},
248 	{ PCI_PRODUCT_INTEL_82371SB_IDE,
249 	  0,
250 	  "Intel 82371SB IDE Interface (PIIX3)",
251 	  piix_chip_map,
252 	},
253 	{ PCI_PRODUCT_INTEL_82371AB_IDE,
254 	  0,
255 	  "Intel 82371AB IDE controller (PIIX4)",
256 	  piix_chip_map,
257 	},
258 	{ PCI_PRODUCT_INTEL_82440MX_IDE,
259 	  0,
260 	  "Intel 82440MX IDE controller",
261 	  piix_chip_map
262 	},
263 	{ PCI_PRODUCT_INTEL_82801AA_IDE,
264 	  0,
265 	  "Intel 82801AA IDE Controller (ICH)",
266 	  piix_chip_map,
267 	},
268 	{ PCI_PRODUCT_INTEL_82801AB_IDE,
269 	  0,
270 	  "Intel 82801AB IDE Controller (ICH0)",
271 	  piix_chip_map,
272 	},
273 	{ PCI_PRODUCT_INTEL_82801BA_IDE,
274 	  0,
275 	  "Intel 82801BA IDE Controller (ICH2)",
276 	  piix_chip_map,
277 	},
278 	{ PCI_PRODUCT_INTEL_82801BAM_IDE,
279 	  0,
280 	  "Intel 82801BAM IDE Controller (ICH2)",
281 	  piix_chip_map,
282 	},
283 	{ 0,
284 	  0,
285 	  NULL,
286 	  NULL
287 	}
288 };
289 
290 const struct pciide_product_desc pciide_amd_products[] =  {
291 	{ PCI_PRODUCT_AMD_PBC756_IDE,
292 	  0,
293 	  "Advanced Micro Devices AMD756 IDE Controller",
294 	  amd7x6_chip_map
295 	},
296 	{ PCI_PRODUCT_AMD_PBC766_IDE,
297 	  0,
298 	  "Advanced Micro Devices AMD766 IDE Controller",
299 	  amd7x6_chip_map
300 	},
301 	{ 0,
302 	  0,
303 	  NULL,
304 	  NULL
305 	}
306 };
307 
308 const struct pciide_product_desc pciide_cmd_products[] =  {
309 	{ PCI_PRODUCT_CMDTECH_640,
310 	  0,
311 	  "CMD Technology PCI0640",
312 	  cmd_chip_map
313 	},
314 	{ PCI_PRODUCT_CMDTECH_643,
315 	  0,
316 	  "CMD Technology PCI0643",
317 	  cmd0643_9_chip_map,
318 	},
319 	{ PCI_PRODUCT_CMDTECH_646,
320 	  0,
321 	  "CMD Technology PCI0646",
322 	  cmd0643_9_chip_map,
323 	},
324 	{ PCI_PRODUCT_CMDTECH_648,
325 	  IDE_PCI_CLASS_OVERRIDE,
326 	  "CMD Technology PCI0648",
327 	  cmd0643_9_chip_map,
328 	},
329 	{ PCI_PRODUCT_CMDTECH_649,
330 	  IDE_PCI_CLASS_OVERRIDE,
331 	  "CMD Technology PCI0649",
332 	  cmd0643_9_chip_map,
333 	},
334 	{ 0,
335 	  0,
336 	  NULL,
337 	  NULL
338 	}
339 };
340 
341 const struct pciide_product_desc pciide_via_products[] =  {
342 	{ PCI_PRODUCT_VIATECH_VT82C586_IDE,
343 	  0,
344 	  NULL,
345 	  apollo_chip_map,
346 	 },
347 	{ PCI_PRODUCT_VIATECH_VT82C586A_IDE,
348 	  0,
349 	  NULL,
350 	  apollo_chip_map,
351 	},
352 	{ 0,
353 	  0,
354 	  NULL,
355 	  NULL
356 	}
357 };
358 
359 const struct pciide_product_desc pciide_cypress_products[] =  {
360 	{ PCI_PRODUCT_CONTAQ_82C693,
361 	  IDE_16BIT_IOSPACE,
362 	  "Cypress 82C693 IDE Controller",
363 	  cy693_chip_map,
364 	},
365 	{ 0,
366 	  0,
367 	  NULL,
368 	  NULL
369 	}
370 };
371 
372 const struct pciide_product_desc pciide_sis_products[] =  {
373 	{ PCI_PRODUCT_SIS_5597_IDE,
374 	  0,
375 	  "Silicon Integrated System 5597/5598 IDE controller",
376 	  sis_chip_map,
377 	},
378 	{ 0,
379 	  0,
380 	  NULL,
381 	  NULL
382 	}
383 };
384 
385 const struct pciide_product_desc pciide_acer_products[] =  {
386 	{ PCI_PRODUCT_ALI_M5229,
387 	  0,
388 	  "Acer Labs M5229 UDMA IDE Controller",
389 	  acer_chip_map,
390 	},
391 	{ 0,
392 	  0,
393 	  NULL,
394 	  NULL
395 	}
396 };
397 
398 const struct pciide_product_desc pciide_promise_products[] =  {
399 	{ PCI_PRODUCT_PROMISE_ULTRA33,
400 	  IDE_PCI_CLASS_OVERRIDE,
401 	  "Promise Ultra33/ATA Bus Master IDE Accelerator",
402 	  pdc202xx_chip_map,
403 	},
404 	{ PCI_PRODUCT_PROMISE_ULTRA66,
405 	  IDE_PCI_CLASS_OVERRIDE,
406 	  "Promise Ultra66/ATA Bus Master IDE Accelerator",
407 	  pdc202xx_chip_map,
408 	},
409 	{ PCI_PRODUCT_PROMISE_ULTRA100,
410 	  IDE_PCI_CLASS_OVERRIDE,
411 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
412 	  pdc202xx_chip_map,
413 	},
414 	{ PCI_PRODUCT_PROMISE_ULTRA100X,
415 	  IDE_PCI_CLASS_OVERRIDE,
416 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
417 	  pdc202xx_chip_map,
418 	},
419 	{ 0,
420 	  0,
421 	  NULL,
422 	  NULL
423 	}
424 };
425 
426 const struct pciide_product_desc pciide_opti_products[] =  {
427 	{ PCI_PRODUCT_OPTI_82C621,
428 	  0,
429 	  "OPTi 82c621 PCI IDE controller",
430 	  opti_chip_map,
431 	},
432 	{ PCI_PRODUCT_OPTI_82C568,
433 	  0,
434 	  "OPTi 82c568 (82c621 compatible) PCI IDE controller",
435 	  opti_chip_map,
436 	},
437 	{ PCI_PRODUCT_OPTI_82D568,
438 	  0,
439 	  "OPTi 82d568 (82c621 compatible) PCI IDE controller",
440 	  opti_chip_map,
441 	},
442 	{ 0,
443 	  0,
444 	  NULL,
445 	  NULL
446 	}
447 };
448 
449 const struct pciide_product_desc pciide_triones_products[] =  {
450 	{ PCI_PRODUCT_TRIONES_HPT366,
451 	  IDE_PCI_CLASS_OVERRIDE,
452 	  NULL,
453 	  hpt_chip_map,
454 	},
455 	{ 0,
456 	  0,
457 	  NULL,
458 	  NULL
459 	}
460 };
461 
462 const struct pciide_product_desc pciide_acard_products[] =  {
463 	{ PCI_PRODUCT_ACARD_ATP850U,
464 	  IDE_PCI_CLASS_OVERRIDE,
465 	  "Acard ATP850U Ultra33 IDE Controller",
466 	  acard_chip_map,
467 	},
468 	{ PCI_PRODUCT_ACARD_ATP860,
469 	  IDE_PCI_CLASS_OVERRIDE,
470 	  "Acard ATP860 Ultra66 IDE Controller",
471 	  acard_chip_map,
472 	},
473 	{ PCI_PRODUCT_ACARD_ATP860A,
474 	  IDE_PCI_CLASS_OVERRIDE,
475 	  "Acard ATP860-A Ultra66 IDE Controller",
476 	  acard_chip_map,
477 	},
478 	{ 0,
479 	  0,
480 	  NULL,
481 	  NULL
482 	}
483 };
484 
485 #ifdef PCIIDE_SERVERWORKS_ENABLE
486 const struct pciide_product_desc pciide_serverworks_products[] =  {
487 	{ PCI_PRODUCT_SERVERWORKS_IDE,
488 	  0,
489 	  "ServerWorks ROSB4 IDE Controller",
490 	  piix_chip_map,
491 	},
492 	{ 0,
493 	  0,
494 	  NULL,
495 	}
496 };
497 #endif
498 
499 #ifdef PCIIDE_WINBOND_ENABLE
500 const struct pciide_product_desc pciide_winbond_products[] =  {
501 	{ PCI_PRODUCT_WINBOND_W83C553F_1,
502 	  0,
503 	  "Winbond W83C553F IDE controller",
504 	  winbond_chip_map,
505 	},
506 	{ 0,
507 	  0,
508 	  NULL,
509 	}
510 };
511 #endif
512 
513 struct pciide_vendor_desc {
514 	u_int32_t ide_vendor;
515 	const struct pciide_product_desc *ide_products;
516 };
517 
518 const struct pciide_vendor_desc pciide_vendors[] = {
519 	{ PCI_VENDOR_INTEL, pciide_intel_products },
520 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products },
521 	{ PCI_VENDOR_VIATECH, pciide_via_products },
522 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products },
523 	{ PCI_VENDOR_SIS, pciide_sis_products },
524 	{ PCI_VENDOR_ALI, pciide_acer_products },
525 	{ PCI_VENDOR_PROMISE, pciide_promise_products },
526 	{ PCI_VENDOR_AMD, pciide_amd_products },
527 	{ PCI_VENDOR_OPTI, pciide_opti_products },
528 	{ PCI_VENDOR_TRIONES, pciide_triones_products },
529 	{ PCI_VENDOR_ACARD, pciide_acard_products },
530 #ifdef PCIIDE_SERVERWORKS_ENABLE
531 	{ PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
532 #endif
533 #ifdef PCIIDE_WINBOND_ENABLE
534 	{ PCI_VENDOR_WINBOND, pciide_winbond_products },
535 #endif
536 	{ 0, NULL }
537 };
538 
539 /* options passed via the 'flags' config keyword */
540 #define PCIIDE_OPTIONS_DMA	0x01
541 
542 int	pciide_match __P((struct device *, struct cfdata *, void *));
543 void	pciide_attach __P((struct device *, struct device *, void *));
544 
545 struct cfattach pciide_ca = {
546 	sizeof(struct pciide_softc), pciide_match, pciide_attach
547 };
548 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
549 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
550 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
551 int	pciide_mapregs_native __P((struct pci_attach_args *,
552 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
553 	    int (*pci_intr) __P((void *))));
554 void	pciide_mapreg_dma __P((struct pciide_softc *,
555 	    struct pci_attach_args *));
556 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
557 void	pciide_mapchan __P((struct pci_attach_args *,
558 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
559 	    int (*pci_intr) __P((void *))));
560 int	pciide_chan_candisable __P((struct pciide_channel *));
561 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
562 	    struct pciide_channel *, int, int));
563 int	pciide_compat_intr __P((void *));
564 int	pciide_pci_intr __P((void *));
565 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
566 
567 const struct pciide_product_desc *
568 pciide_lookup_product(id)
569 	u_int32_t id;
570 {
571 	const struct pciide_product_desc *pp;
572 	const struct pciide_vendor_desc *vp;
573 
574 	for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
575 		if (PCI_VENDOR(id) == vp->ide_vendor)
576 			break;
577 
578 	if ((pp = vp->ide_products) == NULL)
579 		return NULL;
580 
581 	for (; pp->chip_map != NULL; pp++)
582 		if (PCI_PRODUCT(id) == pp->ide_product)
583 			break;
584 
585 	if (pp->chip_map == NULL)
586 		return NULL;
587 	return pp;
588 }
589 
590 int
591 pciide_match(parent, match, aux)
592 	struct device *parent;
593 	struct cfdata *match;
594 	void *aux;
595 {
596 	struct pci_attach_args *pa = aux;
597 	const struct pciide_product_desc *pp;
598 
599 	/*
600 	 * Check the ID register to see that it's a PCI IDE controller.
601 	 * If it is, we assume that we can deal with it; it _should_
602 	 * work in a standardized way...
603 	 */
604 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
605 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
606 		return (1);
607 	}
608 
609 	/*
610 	 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
611 	 * controllers. Let see if we can deal with it anyway.
612 	 */
613 	pp = pciide_lookup_product(pa->pa_id);
614 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
615 		return (1);
616 	}
617 
618 	return (0);
619 }
620 
621 void
622 pciide_attach(parent, self, aux)
623 	struct device *parent, *self;
624 	void *aux;
625 {
626 	struct pci_attach_args *pa = aux;
627 	pci_chipset_tag_t pc = pa->pa_pc;
628 	pcitag_t tag = pa->pa_tag;
629 	struct pciide_softc *sc = (struct pciide_softc *)self;
630 	pcireg_t csr;
631 	char devinfo[256];
632 	const char *displaydev;
633 
634 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
635 	if (sc->sc_pp == NULL) {
636 		sc->sc_pp = &default_product_desc;
637 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
638 		displaydev = devinfo;
639 	} else
640 		displaydev = sc->sc_pp->ide_name;
641 
642 	/* if displaydev == NULL, printf is done in chip-specific map */
643 	if (displaydev)
644 		printf(": %s (rev. 0x%02x)\n", displaydev,
645 		    PCI_REVISION(pa->pa_class));
646 
647 	sc->sc_pc = pa->pa_pc;
648 	sc->sc_tag = pa->pa_tag;
649 #ifdef WDCDEBUG
650 	if (wdcdebug_pciide_mask & DEBUG_PROBE)
651 		pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
652 #endif
653 	sc->sc_pp->chip_map(sc, pa);
654 
655 	if (sc->sc_dma_ok) {
656 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
657 		csr |= PCI_COMMAND_MASTER_ENABLE;
658 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
659 	}
660 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
661 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
662 }
663 
664 /* tell wether the chip is enabled or not */
665 int
666 pciide_chipen(sc, pa)
667 	struct pciide_softc *sc;
668 	struct pci_attach_args *pa;
669 {
670 	pcireg_t csr;
671 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
672 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
673 		    PCI_COMMAND_STATUS_REG);
674 		printf("%s: device disabled (at %s)\n",
675 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
676 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
677 		  "device" : "bridge");
678 		return 0;
679 	}
680 	return 1;
681 }
682 
683 int
684 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
685 	struct pci_attach_args *pa;
686 	struct pciide_channel *cp;
687 	int compatchan;
688 	bus_size_t *cmdsizep, *ctlsizep;
689 {
690 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
691 	struct channel_softc *wdc_cp = &cp->wdc_channel;
692 
693 	cp->compat = 1;
694 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
695 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
696 
697 	wdc_cp->cmd_iot = pa->pa_iot;
698 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
699 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
700 		printf("%s: couldn't map %s channel cmd regs\n",
701 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
702 		return (0);
703 	}
704 
705 	wdc_cp->ctl_iot = pa->pa_iot;
706 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
707 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
708 		printf("%s: couldn't map %s channel ctl regs\n",
709 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
710 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
711 		    PCIIDE_COMPAT_CMD_SIZE);
712 		return (0);
713 	}
714 
715 	return (1);
716 }
717 
718 int
719 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
720 	struct pci_attach_args * pa;
721 	struct pciide_channel *cp;
722 	bus_size_t *cmdsizep, *ctlsizep;
723 	int (*pci_intr) __P((void *));
724 {
725 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
726 	struct channel_softc *wdc_cp = &cp->wdc_channel;
727 	const char *intrstr;
728 	pci_intr_handle_t intrhandle;
729 
730 	cp->compat = 0;
731 
732 	if (sc->sc_pci_ih == NULL) {
733 		if (pci_intr_map(pa, &intrhandle) != 0) {
734 			printf("%s: couldn't map native-PCI interrupt\n",
735 			    sc->sc_wdcdev.sc_dev.dv_xname);
736 			return 0;
737 		}
738 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
739 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
740 		    intrhandle, IPL_BIO, pci_intr, sc);
741 		if (sc->sc_pci_ih != NULL) {
742 			printf("%s: using %s for native-PCI interrupt\n",
743 			    sc->sc_wdcdev.sc_dev.dv_xname,
744 			    intrstr ? intrstr : "unknown interrupt");
745 		} else {
746 			printf("%s: couldn't establish native-PCI interrupt",
747 			    sc->sc_wdcdev.sc_dev.dv_xname);
748 			if (intrstr != NULL)
749 				printf(" at %s", intrstr);
750 			printf("\n");
751 			return 0;
752 		}
753 	}
754 	cp->ih = sc->sc_pci_ih;
755 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
756 	    PCI_MAPREG_TYPE_IO, 0,
757 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
758 		printf("%s: couldn't map %s channel cmd regs\n",
759 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
760 		return 0;
761 	}
762 
763 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
764 	    PCI_MAPREG_TYPE_IO, 0,
765 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
766 		printf("%s: couldn't map %s channel ctl regs\n",
767 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
768 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
769 		return 0;
770 	}
771 	/*
772 	 * In native mode, 4 bytes of I/O space are mapped for the control
773 	 * register, the control register is at offset 2. Pass the generic
774 	 * code a handle for only one byte at the rigth offset.
775 	 */
776 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
777 	    &wdc_cp->ctl_ioh) != 0) {
778 		printf("%s: unable to subregion %s channel ctl regs\n",
779 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
780 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
781 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
782 		return 0;
783 	}
784 	return (1);
785 }
786 
787 void
788 pciide_mapreg_dma(sc, pa)
789 	struct pciide_softc *sc;
790 	struct pci_attach_args *pa;
791 {
792 	pcireg_t maptype;
793 	bus_addr_t addr;
794 
795 	/*
796 	 * Map DMA registers
797 	 *
798 	 * Note that sc_dma_ok is the right variable to test to see if
799 	 * DMA can be done.  If the interface doesn't support DMA,
800 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
801 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
802 	 * non-zero if the interface supports DMA and the registers
803 	 * could be mapped.
804 	 *
805 	 * XXX Note that despite the fact that the Bus Master IDE specs
806 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
807 	 * XXX space," some controllers (at least the United
808 	 * XXX Microelectronics UM8886BF) place it in memory space.
809 	 */
810 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
811 	    PCIIDE_REG_BUS_MASTER_DMA);
812 
813 	switch (maptype) {
814 	case PCI_MAPREG_TYPE_IO:
815 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
816 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
817 		    &addr, NULL, NULL) == 0);
818 		if (sc->sc_dma_ok == 0) {
819 			printf(", but unused (couldn't query registers)");
820 			break;
821 		}
822 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
823 		    && addr >= 0x10000) {
824 			sc->sc_dma_ok = 0;
825 			printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
826 			break;
827 		}
828 		/* FALLTHROUGH */
829 
830 	case PCI_MAPREG_MEM_TYPE_32BIT:
831 		sc->sc_dma_ok = (pci_mapreg_map(pa,
832 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
833 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
834 		sc->sc_dmat = pa->pa_dmat;
835 		if (sc->sc_dma_ok == 0) {
836 			printf(", but unused (couldn't map registers)");
837 		} else {
838 			sc->sc_wdcdev.dma_arg = sc;
839 			sc->sc_wdcdev.dma_init = pciide_dma_init;
840 			sc->sc_wdcdev.dma_start = pciide_dma_start;
841 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
842 		}
843 		break;
844 
845 	default:
846 		sc->sc_dma_ok = 0;
847 		printf(", but unsupported register maptype (0x%x)", maptype);
848 	}
849 }
850 
851 int
852 pciide_compat_intr(arg)
853 	void *arg;
854 {
855 	struct pciide_channel *cp = arg;
856 
857 #ifdef DIAGNOSTIC
858 	/* should only be called for a compat channel */
859 	if (cp->compat == 0)
860 		panic("pciide compat intr called for non-compat chan %p\n", cp);
861 #endif
862 	return (wdcintr(&cp->wdc_channel));
863 }
864 
865 int
866 pciide_pci_intr(arg)
867 	void *arg;
868 {
869 	struct pciide_softc *sc = arg;
870 	struct pciide_channel *cp;
871 	struct channel_softc *wdc_cp;
872 	int i, rv, crv;
873 
874 	rv = 0;
875 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
876 		cp = &sc->pciide_channels[i];
877 		wdc_cp = &cp->wdc_channel;
878 
879 		/* If a compat channel skip. */
880 		if (cp->compat)
881 			continue;
882 		/* if this channel not waiting for intr, skip */
883 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
884 			continue;
885 
886 		crv = wdcintr(wdc_cp);
887 		if (crv == 0)
888 			;		/* leave rv alone */
889 		else if (crv == 1)
890 			rv = 1;		/* claim the intr */
891 		else if (rv == 0)	/* crv should be -1 in this case */
892 			rv = crv;	/* if we've done no better, take it */
893 	}
894 	return (rv);
895 }
896 
897 void
898 pciide_channel_dma_setup(cp)
899 	struct pciide_channel *cp;
900 {
901 	int drive;
902 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
903 	struct ata_drive_datas *drvp;
904 
905 	for (drive = 0; drive < 2; drive++) {
906 		drvp = &cp->wdc_channel.ch_drive[drive];
907 		/* If no drive, skip */
908 		if ((drvp->drive_flags & DRIVE) == 0)
909 			continue;
910 		/* setup DMA if needed */
911 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
912 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
913 		    sc->sc_dma_ok == 0) {
914 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
915 			continue;
916 		}
917 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
918 		    != 0) {
919 			/* Abort DMA setup */
920 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
921 			continue;
922 		}
923 	}
924 }
925 
926 int
927 pciide_dma_table_setup(sc, channel, drive)
928 	struct pciide_softc *sc;
929 	int channel, drive;
930 {
931 	bus_dma_segment_t seg;
932 	int error, rseg;
933 	const bus_size_t dma_table_size =
934 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
935 	struct pciide_dma_maps *dma_maps =
936 	    &sc->pciide_channels[channel].dma_maps[drive];
937 
938 	/* If table was already allocated, just return */
939 	if (dma_maps->dma_table)
940 		return 0;
941 
942 	/* Allocate memory for the DMA tables and map it */
943 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
944 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
945 	    BUS_DMA_NOWAIT)) != 0) {
946 		printf("%s:%d: unable to allocate table DMA for "
947 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
948 		    channel, drive, error);
949 		return error;
950 	}
951 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
952 	    dma_table_size,
953 	    (caddr_t *)&dma_maps->dma_table,
954 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
955 		printf("%s:%d: unable to map table DMA for"
956 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
957 		    channel, drive, error);
958 		return error;
959 	}
960 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
961 	    "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
962 	    (unsigned long)seg.ds_addr), DEBUG_PROBE);
963 
964 	/* Create and load table DMA map for this disk */
965 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
966 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
967 	    &dma_maps->dmamap_table)) != 0) {
968 		printf("%s:%d: unable to create table DMA map for "
969 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
970 		    channel, drive, error);
971 		return error;
972 	}
973 	if ((error = bus_dmamap_load(sc->sc_dmat,
974 	    dma_maps->dmamap_table,
975 	    dma_maps->dma_table,
976 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
977 		printf("%s:%d: unable to load table DMA map for "
978 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
979 		    channel, drive, error);
980 		return error;
981 	}
982 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
983 	    (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
984 	    DEBUG_PROBE);
985 	/* Create a xfer DMA map for this drive */
986 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
987 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
988 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
989 	    &dma_maps->dmamap_xfer)) != 0) {
990 		printf("%s:%d: unable to create xfer DMA map for "
991 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
992 		    channel, drive, error);
993 		return error;
994 	}
995 	return 0;
996 }
997 
998 int
999 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1000 	void *v;
1001 	int channel, drive;
1002 	void *databuf;
1003 	size_t datalen;
1004 	int flags;
1005 {
1006 	struct pciide_softc *sc = v;
1007 	int error, seg;
1008 	struct pciide_dma_maps *dma_maps =
1009 	    &sc->pciide_channels[channel].dma_maps[drive];
1010 
1011 	error = bus_dmamap_load(sc->sc_dmat,
1012 	    dma_maps->dmamap_xfer,
1013 	    databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1014 	    ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1015 	if (error) {
1016 		printf("%s:%d: unable to load xfer DMA map for"
1017 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1018 		    channel, drive, error);
1019 		return error;
1020 	}
1021 
1022 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1023 	    dma_maps->dmamap_xfer->dm_mapsize,
1024 	    (flags & WDC_DMA_READ) ?
1025 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1026 
1027 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1028 #ifdef DIAGNOSTIC
1029 		/* A segment must not cross a 64k boundary */
1030 		{
1031 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1032 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1033 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1034 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1035 			printf("pciide_dma: segment %d physical addr 0x%lx"
1036 			    " len 0x%lx not properly aligned\n",
1037 			    seg, phys, len);
1038 			panic("pciide_dma: buf align");
1039 		}
1040 		}
1041 #endif
1042 		dma_maps->dma_table[seg].base_addr =
1043 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1044 		dma_maps->dma_table[seg].byte_count =
1045 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1046 		    IDEDMA_BYTE_COUNT_MASK);
1047 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1048 		   seg, le32toh(dma_maps->dma_table[seg].byte_count),
1049 		   le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1050 
1051 	}
1052 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1053 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1054 
1055 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1056 	    dma_maps->dmamap_table->dm_mapsize,
1057 	    BUS_DMASYNC_PREWRITE);
1058 
1059 	/* Maps are ready. Start DMA function */
1060 #ifdef DIAGNOSTIC
1061 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1062 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1063 		    (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1064 		panic("pciide_dma_init: table align");
1065 	}
1066 #endif
1067 
1068 	/* Clear status bits */
1069 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1070 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1071 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1072 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1073 	/* Write table addr */
1074 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1075 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1076 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1077 	/* set read/write */
1078 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1079 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1080 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1081 	/* remember flags */
1082 	dma_maps->dma_flags = flags;
1083 	return 0;
1084 }
1085 
1086 void
1087 pciide_dma_start(v, channel, drive)
1088 	void *v;
1089 	int channel, drive;
1090 {
1091 	struct pciide_softc *sc = v;
1092 
1093 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1094 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1095 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1096 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1097 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1098 }
1099 
1100 int
1101 pciide_dma_finish(v, channel, drive, force)
1102 	void *v;
1103 	int channel, drive;
1104 	int force;
1105 {
1106 	struct pciide_softc *sc = v;
1107 	u_int8_t status;
1108 	int error = 0;
1109 	struct pciide_dma_maps *dma_maps =
1110 	    &sc->pciide_channels[channel].dma_maps[drive];
1111 
1112 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1113 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1114 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1115 	    DEBUG_XFERS);
1116 
1117 	if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1118 		return WDC_DMAST_NOIRQ;
1119 
1120 	/* stop DMA channel */
1121 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1122 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1123 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1124 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1125 
1126 	/* Unload the map of the data buffer */
1127 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1128 	    dma_maps->dmamap_xfer->dm_mapsize,
1129 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1130 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1131 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1132 
1133 	if ((status & IDEDMA_CTL_ERR) != 0) {
1134 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1135 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1136 		error |= WDC_DMAST_ERR;
1137 	}
1138 
1139 	if ((status & IDEDMA_CTL_INTR) == 0) {
1140 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1141 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1142 		    drive, status);
1143 		error |= WDC_DMAST_NOIRQ;
1144 	}
1145 
1146 	if ((status & IDEDMA_CTL_ACT) != 0) {
1147 		/* data underrun, may be a valid condition for ATAPI */
1148 		error |= WDC_DMAST_UNDER;
1149 	}
1150 	return error;
1151 }
1152 
1153 void
1154 pciide_irqack(chp)
1155 	struct channel_softc *chp;
1156 {
1157 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1158 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1159 
1160 	/* clear status bits in IDE DMA registers */
1161 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1162 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1163 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1164 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1165 }
1166 
1167 /* some common code used by several chip_map */
1168 int
1169 pciide_chansetup(sc, channel, interface)
1170 	struct pciide_softc *sc;
1171 	int channel;
1172 	pcireg_t interface;
1173 {
1174 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1175 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1176 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1177 	cp->wdc_channel.channel = channel;
1178 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1179 	cp->wdc_channel.ch_queue =
1180 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1181 	if (cp->wdc_channel.ch_queue == NULL) {
1182 		printf("%s %s channel: "
1183 		    "can't allocate memory for command queue",
1184 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1185 		return 0;
1186 	}
1187 	printf("%s: %s channel %s to %s mode\n",
1188 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1189 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1190 	    "configured" : "wired",
1191 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1192 	    "native-PCI" : "compatibility");
1193 	return 1;
1194 }
1195 
1196 /* some common code used by several chip channel_map */
1197 void
1198 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1199 	struct pci_attach_args *pa;
1200 	struct pciide_channel *cp;
1201 	pcireg_t interface;
1202 	bus_size_t *cmdsizep, *ctlsizep;
1203 	int (*pci_intr) __P((void *));
1204 {
1205 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1206 
1207 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1208 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1209 		    pci_intr);
1210 	else
1211 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1212 		    wdc_cp->channel, cmdsizep, ctlsizep);
1213 
1214 	if (cp->hw_ok == 0)
1215 		return;
1216 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1217 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1218 	wdcattach(wdc_cp);
1219 }
1220 
1221 /*
1222  * Generic code to call to know if a channel can be disabled. Return 1
1223  * if channel can be disabled, 0 if not
1224  */
1225 int
1226 pciide_chan_candisable(cp)
1227 	struct pciide_channel *cp;
1228 {
1229 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1230 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1231 
1232 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1233 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1234 		printf("%s: disabling %s channel (no drives)\n",
1235 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1236 		cp->hw_ok = 0;
1237 		return 1;
1238 	}
1239 	return 0;
1240 }
1241 
1242 /*
1243  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1244  * Set hw_ok=0 on failure
1245  */
1246 void
1247 pciide_map_compat_intr(pa, cp, compatchan, interface)
1248 	struct pci_attach_args *pa;
1249 	struct pciide_channel *cp;
1250 	int compatchan, interface;
1251 {
1252 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1253 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1254 
1255 	if (cp->hw_ok == 0)
1256 		return;
1257 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1258 		return;
1259 
1260 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1261 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1262 	    pa, compatchan, pciide_compat_intr, cp);
1263 	if (cp->ih == NULL) {
1264 #endif
1265 		printf("%s: no compatibility interrupt for use by %s "
1266 		    "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1267 		cp->hw_ok = 0;
1268 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1269 	}
1270 #endif
1271 }
1272 
1273 void
1274 pciide_print_modes(cp)
1275 	struct pciide_channel *cp;
1276 {
1277 	wdc_print_modes(&cp->wdc_channel);
1278 }
1279 
1280 void
1281 default_chip_map(sc, pa)
1282 	struct pciide_softc *sc;
1283 	struct pci_attach_args *pa;
1284 {
1285 	struct pciide_channel *cp;
1286 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1287 	pcireg_t csr;
1288 	int channel, drive;
1289 	struct ata_drive_datas *drvp;
1290 	u_int8_t idedma_ctl;
1291 	bus_size_t cmdsize, ctlsize;
1292 	char *failreason;
1293 
1294 	if (pciide_chipen(sc, pa) == 0)
1295 		return;
1296 
1297 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1298 		printf("%s: bus-master DMA support present",
1299 		    sc->sc_wdcdev.sc_dev.dv_xname);
1300 		if (sc->sc_pp == &default_product_desc &&
1301 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1302 		    PCIIDE_OPTIONS_DMA) == 0) {
1303 			printf(", but unused (no driver support)");
1304 			sc->sc_dma_ok = 0;
1305 		} else {
1306 			pciide_mapreg_dma(sc, pa);
1307 		if (sc->sc_dma_ok != 0)
1308 			printf(", used without full driver "
1309 			    "support");
1310 		}
1311 	} else {
1312 		printf("%s: hardware does not support DMA",
1313 		    sc->sc_wdcdev.sc_dev.dv_xname);
1314 		sc->sc_dma_ok = 0;
1315 	}
1316 	printf("\n");
1317 	if (sc->sc_dma_ok) {
1318 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1319 		sc->sc_wdcdev.irqack = pciide_irqack;
1320 	}
1321 	sc->sc_wdcdev.PIO_cap = 0;
1322 	sc->sc_wdcdev.DMA_cap = 0;
1323 
1324 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1325 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1326 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1327 
1328 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1329 		cp = &sc->pciide_channels[channel];
1330 		if (pciide_chansetup(sc, channel, interface) == 0)
1331 			continue;
1332 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1333 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1334 			    &ctlsize, pciide_pci_intr);
1335 		} else {
1336 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1337 			    channel, &cmdsize, &ctlsize);
1338 		}
1339 		if (cp->hw_ok == 0)
1340 			continue;
1341 		/*
1342 		 * Check to see if something appears to be there.
1343 		 */
1344 		failreason = NULL;
1345 		if (!wdcprobe(&cp->wdc_channel)) {
1346 			failreason = "not responding; disabled or no drives?";
1347 			goto next;
1348 		}
1349 		/*
1350 		 * Now, make sure it's actually attributable to this PCI IDE
1351 		 * channel by trying to access the channel again while the
1352 		 * PCI IDE controller's I/O space is disabled.  (If the
1353 		 * channel no longer appears to be there, it belongs to
1354 		 * this controller.)  YUCK!
1355 		 */
1356 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1357 		    PCI_COMMAND_STATUS_REG);
1358 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1359 		    csr & ~PCI_COMMAND_IO_ENABLE);
1360 		if (wdcprobe(&cp->wdc_channel))
1361 			failreason = "other hardware responding at addresses";
1362 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1363 		    PCI_COMMAND_STATUS_REG, csr);
1364 next:
1365 		if (failreason) {
1366 			printf("%s: %s channel ignored (%s)\n",
1367 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1368 			    failreason);
1369 			cp->hw_ok = 0;
1370 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1371 			    cp->wdc_channel.cmd_ioh, cmdsize);
1372 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1373 			    cp->wdc_channel.ctl_ioh, ctlsize);
1374 		} else {
1375 			pciide_map_compat_intr(pa, cp, channel, interface);
1376 		}
1377 		if (cp->hw_ok) {
1378 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1379 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1380 			wdcattach(&cp->wdc_channel);
1381 		}
1382 	}
1383 
1384 	if (sc->sc_dma_ok == 0)
1385 		return;
1386 
1387 	/* Allocate DMA maps */
1388 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1389 		idedma_ctl = 0;
1390 		cp = &sc->pciide_channels[channel];
1391 		for (drive = 0; drive < 2; drive++) {
1392 			drvp = &cp->wdc_channel.ch_drive[drive];
1393 			/* If no drive, skip */
1394 			if ((drvp->drive_flags & DRIVE) == 0)
1395 				continue;
1396 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1397 				continue;
1398 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1399 				/* Abort DMA setup */
1400 				printf("%s:%d:%d: can't allocate DMA maps, "
1401 				    "using PIO transfers\n",
1402 				    sc->sc_wdcdev.sc_dev.dv_xname,
1403 				    channel, drive);
1404 				drvp->drive_flags &= ~DRIVE_DMA;
1405 			}
1406 			printf("%s:%d:%d: using DMA data transfers\n",
1407 			    sc->sc_wdcdev.sc_dev.dv_xname,
1408 			    channel, drive);
1409 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1410 		}
1411 		if (idedma_ctl != 0) {
1412 			/* Add software bits in status register */
1413 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1414 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1415 			    idedma_ctl);
1416 		}
1417 	}
1418 }
1419 
1420 void
1421 piix_chip_map(sc, pa)
1422 	struct pciide_softc *sc;
1423 	struct pci_attach_args *pa;
1424 {
1425 	struct pciide_channel *cp;
1426 	int channel;
1427 	u_int32_t idetim;
1428 	bus_size_t cmdsize, ctlsize;
1429 
1430 	if (pciide_chipen(sc, pa) == 0)
1431 		return;
1432 
1433 	printf("%s: bus-master DMA support present",
1434 	    sc->sc_wdcdev.sc_dev.dv_xname);
1435 	pciide_mapreg_dma(sc, pa);
1436 	printf("\n");
1437 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1438 	    WDC_CAPABILITY_MODE;
1439 	if (sc->sc_dma_ok) {
1440 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1441 		sc->sc_wdcdev.irqack = pciide_irqack;
1442 		switch(sc->sc_pp->ide_product) {
1443 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1444 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1445 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1446 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1447 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1448 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1449 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1450 		}
1451 	}
1452 	sc->sc_wdcdev.PIO_cap = 4;
1453 	sc->sc_wdcdev.DMA_cap = 2;
1454 	switch(sc->sc_pp->ide_product) {
1455 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1456 		sc->sc_wdcdev.UDMA_cap = 4;
1457 		break;
1458 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1459 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1460 		sc->sc_wdcdev.UDMA_cap = 5;
1461 		break;
1462 	default:
1463 		sc->sc_wdcdev.UDMA_cap = 2;
1464 	}
1465 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1466 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1467 	else
1468 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1469 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1470 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1471 
1472 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1473 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1474 	    DEBUG_PROBE);
1475 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1476 		WDCDEBUG_PRINT((", sidetim=0x%x",
1477 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1478 		    DEBUG_PROBE);
1479 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1480 			WDCDEBUG_PRINT((", udamreg 0x%x",
1481 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1482 			    DEBUG_PROBE);
1483 		}
1484 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1485 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1486 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1487 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1488 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1489 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1490 			    DEBUG_PROBE);
1491 		}
1492 
1493 	}
1494 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1495 
1496 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1497 		cp = &sc->pciide_channels[channel];
1498 		/* PIIX is compat-only */
1499 		if (pciide_chansetup(sc, channel, 0) == 0)
1500 			continue;
1501 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1502 		if ((PIIX_IDETIM_READ(idetim, channel) &
1503 		    PIIX_IDETIM_IDE) == 0) {
1504 			printf("%s: %s channel ignored (disabled)\n",
1505 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1506 			continue;
1507 		}
1508 		/* PIIX are compat-only pciide devices */
1509 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1510 		if (cp->hw_ok == 0)
1511 			continue;
1512 		if (pciide_chan_candisable(cp)) {
1513 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1514 			    channel);
1515 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1516 			    idetim);
1517 		}
1518 		pciide_map_compat_intr(pa, cp, channel, 0);
1519 		if (cp->hw_ok == 0)
1520 			continue;
1521 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1522 	}
1523 
1524 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1525 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1526 	    DEBUG_PROBE);
1527 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1528 		WDCDEBUG_PRINT((", sidetim=0x%x",
1529 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1530 		    DEBUG_PROBE);
1531 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1532 			WDCDEBUG_PRINT((", udamreg 0x%x",
1533 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1534 			    DEBUG_PROBE);
1535 		}
1536 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1537 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1538 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1539 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1540 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1541 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1542 			    DEBUG_PROBE);
1543 		}
1544 	}
1545 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1546 }
1547 
1548 void
1549 piix_setup_channel(chp)
1550 	struct channel_softc *chp;
1551 {
1552 	u_int8_t mode[2], drive;
1553 	u_int32_t oidetim, idetim, idedma_ctl;
1554 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1555 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1556 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1557 
1558 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1559 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1560 	idedma_ctl = 0;
1561 
1562 	/* set up new idetim: Enable IDE registers decode */
1563 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1564 	    chp->channel);
1565 
1566 	/* setup DMA */
1567 	pciide_channel_dma_setup(cp);
1568 
1569 	/*
1570 	 * Here we have to mess up with drives mode: PIIX can't have
1571 	 * different timings for master and slave drives.
1572 	 * We need to find the best combination.
1573 	 */
1574 
1575 	/* If both drives supports DMA, take the lower mode */
1576 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1577 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1578 		mode[0] = mode[1] =
1579 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1580 		    drvp[0].DMA_mode = mode[0];
1581 		    drvp[1].DMA_mode = mode[1];
1582 		goto ok;
1583 	}
1584 	/*
1585 	 * If only one drive supports DMA, use its mode, and
1586 	 * put the other one in PIO mode 0 if mode not compatible
1587 	 */
1588 	if (drvp[0].drive_flags & DRIVE_DMA) {
1589 		mode[0] = drvp[0].DMA_mode;
1590 		mode[1] = drvp[1].PIO_mode;
1591 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1592 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1593 			mode[1] = drvp[1].PIO_mode = 0;
1594 		goto ok;
1595 	}
1596 	if (drvp[1].drive_flags & DRIVE_DMA) {
1597 		mode[1] = drvp[1].DMA_mode;
1598 		mode[0] = drvp[0].PIO_mode;
1599 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1600 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1601 			mode[0] = drvp[0].PIO_mode = 0;
1602 		goto ok;
1603 	}
1604 	/*
1605 	 * If both drives are not DMA, takes the lower mode, unless
1606 	 * one of them is PIO mode < 2
1607 	 */
1608 	if (drvp[0].PIO_mode < 2) {
1609 		mode[0] = drvp[0].PIO_mode = 0;
1610 		mode[1] = drvp[1].PIO_mode;
1611 	} else if (drvp[1].PIO_mode < 2) {
1612 		mode[1] = drvp[1].PIO_mode = 0;
1613 		mode[0] = drvp[0].PIO_mode;
1614 	} else {
1615 		mode[0] = mode[1] =
1616 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1617 		drvp[0].PIO_mode = mode[0];
1618 		drvp[1].PIO_mode = mode[1];
1619 	}
1620 ok:	/* The modes are setup */
1621 	for (drive = 0; drive < 2; drive++) {
1622 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1623 			idetim |= piix_setup_idetim_timings(
1624 			    mode[drive], 1, chp->channel);
1625 			goto end;
1626 		}
1627 	}
1628 	/* If we are there, none of the drives are DMA */
1629 	if (mode[0] >= 2)
1630 		idetim |= piix_setup_idetim_timings(
1631 		    mode[0], 0, chp->channel);
1632 	else
1633 		idetim |= piix_setup_idetim_timings(
1634 		    mode[1], 0, chp->channel);
1635 end:	/*
1636 	 * timing mode is now set up in the controller. Enable
1637 	 * it per-drive
1638 	 */
1639 	for (drive = 0; drive < 2; drive++) {
1640 		/* If no drive, skip */
1641 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1642 			continue;
1643 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1644 		if (drvp[drive].drive_flags & DRIVE_DMA)
1645 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1646 	}
1647 	if (idedma_ctl != 0) {
1648 		/* Add software bits in status register */
1649 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1650 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1651 		    idedma_ctl);
1652 	}
1653 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1654 	pciide_print_modes(cp);
1655 }
1656 
1657 void
1658 piix3_4_setup_channel(chp)
1659 	struct channel_softc *chp;
1660 {
1661 	struct ata_drive_datas *drvp;
1662 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1663 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1664 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1665 	int drive;
1666 	int channel = chp->channel;
1667 
1668 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1669 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1670 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1671 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1672 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1673 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1674 	    PIIX_SIDETIM_RTC_MASK(channel));
1675 
1676 	idedma_ctl = 0;
1677 	/* If channel disabled, no need to go further */
1678 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1679 		return;
1680 	/* set up new idetim: Enable IDE registers decode */
1681 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1682 
1683 	/* setup DMA if needed */
1684 	pciide_channel_dma_setup(cp);
1685 
1686 	for (drive = 0; drive < 2; drive++) {
1687 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1688 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1689 		drvp = &chp->ch_drive[drive];
1690 		/* If no drive, skip */
1691 		if ((drvp->drive_flags & DRIVE) == 0)
1692 			continue;
1693 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1694 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1695 			goto pio;
1696 
1697 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1698 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1699 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1700 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1701 			ideconf |= PIIX_CONFIG_PINGPONG;
1702 		}
1703 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1704 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1705 			/* setup Ultra/100 */
1706 			if (drvp->UDMA_mode > 2 &&
1707 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1708 				drvp->UDMA_mode = 2;
1709 			if (drvp->UDMA_mode > 4) {
1710 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1711 			} else {
1712 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1713 				if (drvp->UDMA_mode > 2) {
1714 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1715 					    drive);
1716 				} else {
1717 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1718 					    drive);
1719 				}
1720 			}
1721 		}
1722 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1723 			/* setup Ultra/66 */
1724 			if (drvp->UDMA_mode > 2 &&
1725 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1726 				drvp->UDMA_mode = 2;
1727 			if (drvp->UDMA_mode > 2)
1728 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1729 			else
1730 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1731 		}
1732 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1733 		    (drvp->drive_flags & DRIVE_UDMA)) {
1734 			/* use Ultra/DMA */
1735 			drvp->drive_flags &= ~DRIVE_DMA;
1736 			udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1737 			udmareg |= PIIX_UDMATIM_SET(
1738 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1739 		} else {
1740 			/* use Multiword DMA */
1741 			drvp->drive_flags &= ~DRIVE_UDMA;
1742 			if (drive == 0) {
1743 				idetim |= piix_setup_idetim_timings(
1744 				    drvp->DMA_mode, 1, channel);
1745 			} else {
1746 				sidetim |= piix_setup_sidetim_timings(
1747 					drvp->DMA_mode, 1, channel);
1748 				idetim =PIIX_IDETIM_SET(idetim,
1749 				    PIIX_IDETIM_SITRE, channel);
1750 			}
1751 		}
1752 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1753 
1754 pio:		/* use PIO mode */
1755 		idetim |= piix_setup_idetim_drvs(drvp);
1756 		if (drive == 0) {
1757 			idetim |= piix_setup_idetim_timings(
1758 			    drvp->PIO_mode, 0, channel);
1759 		} else {
1760 			sidetim |= piix_setup_sidetim_timings(
1761 				drvp->PIO_mode, 0, channel);
1762 			idetim =PIIX_IDETIM_SET(idetim,
1763 			    PIIX_IDETIM_SITRE, channel);
1764 		}
1765 	}
1766 	if (idedma_ctl != 0) {
1767 		/* Add software bits in status register */
1768 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1769 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1770 		    idedma_ctl);
1771 	}
1772 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1773 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1774 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1775 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1776 	pciide_print_modes(cp);
1777 }
1778 
1779 
1780 /* setup ISP and RTC fields, based on mode */
1781 static u_int32_t
1782 piix_setup_idetim_timings(mode, dma, channel)
1783 	u_int8_t mode;
1784 	u_int8_t dma;
1785 	u_int8_t channel;
1786 {
1787 
1788 	if (dma)
1789 		return PIIX_IDETIM_SET(0,
1790 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1791 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1792 		    channel);
1793 	else
1794 		return PIIX_IDETIM_SET(0,
1795 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1796 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1797 		    channel);
1798 }
1799 
1800 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1801 static u_int32_t
1802 piix_setup_idetim_drvs(drvp)
1803 	struct ata_drive_datas *drvp;
1804 {
1805 	u_int32_t ret = 0;
1806 	struct channel_softc *chp = drvp->chnl_softc;
1807 	u_int8_t channel = chp->channel;
1808 	u_int8_t drive = drvp->drive;
1809 
1810 	/*
1811 	 * If drive is using UDMA, timings setups are independant
1812 	 * So just check DMA and PIO here.
1813 	 */
1814 	if (drvp->drive_flags & DRIVE_DMA) {
1815 		/* if mode = DMA mode 0, use compatible timings */
1816 		if ((drvp->drive_flags & DRIVE_DMA) &&
1817 		    drvp->DMA_mode == 0) {
1818 			drvp->PIO_mode = 0;
1819 			return ret;
1820 		}
1821 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1822 		/*
1823 		 * PIO and DMA timings are the same, use fast timings for PIO
1824 		 * too, else use compat timings.
1825 		 */
1826 		if ((piix_isp_pio[drvp->PIO_mode] !=
1827 		    piix_isp_dma[drvp->DMA_mode]) ||
1828 		    (piix_rtc_pio[drvp->PIO_mode] !=
1829 		    piix_rtc_dma[drvp->DMA_mode]))
1830 			drvp->PIO_mode = 0;
1831 		/* if PIO mode <= 2, use compat timings for PIO */
1832 		if (drvp->PIO_mode <= 2) {
1833 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1834 			    channel);
1835 			return ret;
1836 		}
1837 	}
1838 
1839 	/*
1840 	 * Now setup PIO modes. If mode < 2, use compat timings.
1841 	 * Else enable fast timings. Enable IORDY and prefetch/post
1842 	 * if PIO mode >= 3.
1843 	 */
1844 
1845 	if (drvp->PIO_mode < 2)
1846 		return ret;
1847 
1848 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1849 	if (drvp->PIO_mode >= 3) {
1850 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1851 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1852 	}
1853 	return ret;
1854 }
1855 
1856 /* setup values in SIDETIM registers, based on mode */
1857 static u_int32_t
1858 piix_setup_sidetim_timings(mode, dma, channel)
1859 	u_int8_t mode;
1860 	u_int8_t dma;
1861 	u_int8_t channel;
1862 {
1863 	if (dma)
1864 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1865 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1866 	else
1867 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1868 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1869 }
1870 
1871 void
1872 amd7x6_chip_map(sc, pa)
1873 	struct pciide_softc *sc;
1874 	struct pci_attach_args *pa;
1875 {
1876 	struct pciide_channel *cp;
1877 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1878 	int channel;
1879 	pcireg_t chanenable;
1880 	bus_size_t cmdsize, ctlsize;
1881 
1882 	if (pciide_chipen(sc, pa) == 0)
1883 		return;
1884 	printf("%s: bus-master DMA support present",
1885 	    sc->sc_wdcdev.sc_dev.dv_xname);
1886 	pciide_mapreg_dma(sc, pa);
1887 	printf("\n");
1888 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1889 	    WDC_CAPABILITY_MODE;
1890 	if (sc->sc_dma_ok) {
1891 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1892 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1893 		sc->sc_wdcdev.irqack = pciide_irqack;
1894 	}
1895 	sc->sc_wdcdev.PIO_cap = 4;
1896 	sc->sc_wdcdev.DMA_cap = 2;
1897 
1898 	if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1899 		sc->sc_wdcdev.UDMA_cap = 5;
1900 	else
1901 		sc->sc_wdcdev.UDMA_cap = 4;
1902 	sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1903 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1904 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1905 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1906 
1907 	WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1908 	    DEBUG_PROBE);
1909 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1910 		cp = &sc->pciide_channels[channel];
1911 		if (pciide_chansetup(sc, channel, interface) == 0)
1912 			continue;
1913 
1914 		if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1915 			printf("%s: %s channel ignored (disabled)\n",
1916 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1917 			continue;
1918 		}
1919 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1920 		    pciide_pci_intr);
1921 
1922 		if (pciide_chan_candisable(cp))
1923 			chanenable &= ~AMD7X6_CHAN_EN(channel);
1924 		pciide_map_compat_intr(pa, cp, channel, interface);
1925 		if (cp->hw_ok == 0)
1926 			continue;
1927 
1928 		amd7x6_setup_channel(&cp->wdc_channel);
1929 	}
1930 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1931 	    chanenable);
1932 	return;
1933 }
1934 
1935 void
1936 amd7x6_setup_channel(chp)
1937 	struct channel_softc *chp;
1938 {
1939 	u_int32_t udmatim_reg, datatim_reg;
1940 	u_int8_t idedma_ctl;
1941 	int mode, drive;
1942 	struct ata_drive_datas *drvp;
1943 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1944 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1945 #ifndef PCIIDE_AMD756_ENABLEDMA
1946 	int rev = PCI_REVISION(
1947 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1948 #endif
1949 
1950 	idedma_ctl = 0;
1951 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1952 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1953 	datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1954 	udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1955 
1956 	/* setup DMA if needed */
1957 	pciide_channel_dma_setup(cp);
1958 
1959 	for (drive = 0; drive < 2; drive++) {
1960 		drvp = &chp->ch_drive[drive];
1961 		/* If no drive, skip */
1962 		if ((drvp->drive_flags & DRIVE) == 0)
1963 			continue;
1964 		/* add timing values, setup DMA if needed */
1965 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1966 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1967 			mode = drvp->PIO_mode;
1968 			goto pio;
1969 		}
1970 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1971 		    (drvp->drive_flags & DRIVE_UDMA)) {
1972 			/* use Ultra/DMA */
1973 			drvp->drive_flags &= ~DRIVE_DMA;
1974 			udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1975 			    AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1976 			    AMD7X6_UDMA_TIME(chp->channel, drive,
1977 				amd7x6_udma_tim[drvp->UDMA_mode]);
1978 			/* can use PIO timings, MW DMA unused */
1979 			mode = drvp->PIO_mode;
1980 		} else {
1981 			/* use Multiword DMA, but only if revision is OK */
1982 			drvp->drive_flags &= ~DRIVE_UDMA;
1983 #ifndef PCIIDE_AMD756_ENABLEDMA
1984 			/*
1985 			 * The workaround doesn't seem to be necessary
1986 			 * with all drives, so it can be disabled by
1987 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1988 			 * triggered.
1989 			 */
1990 			if (sc->sc_pp->ide_product ==
1991 			      PCI_PRODUCT_AMD_PBC756_IDE &&
1992 			    AMD756_CHIPREV_DISABLEDMA(rev)) {
1993 				printf("%s:%d:%d: multi-word DMA disabled due "
1994 				    "to chip revision\n",
1995 				    sc->sc_wdcdev.sc_dev.dv_xname,
1996 				    chp->channel, drive);
1997 				mode = drvp->PIO_mode;
1998 				drvp->drive_flags &= ~DRIVE_DMA;
1999 				goto pio;
2000 			}
2001 #endif
2002 			/* mode = min(pio, dma+2) */
2003 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2004 				mode = drvp->PIO_mode;
2005 			else
2006 				mode = drvp->DMA_mode + 2;
2007 		}
2008 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2009 
2010 pio:		/* setup PIO mode */
2011 		if (mode <= 2) {
2012 			drvp->DMA_mode = 0;
2013 			drvp->PIO_mode = 0;
2014 			mode = 0;
2015 		} else {
2016 			drvp->PIO_mode = mode;
2017 			drvp->DMA_mode = mode - 2;
2018 		}
2019 		datatim_reg |=
2020 		    AMD7X6_DATATIM_PULSE(chp->channel, drive,
2021 			amd7x6_pio_set[mode]) |
2022 		    AMD7X6_DATATIM_RECOV(chp->channel, drive,
2023 			amd7x6_pio_rec[mode]);
2024 	}
2025 	if (idedma_ctl != 0) {
2026 		/* Add software bits in status register */
2027 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2028 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2029 		    idedma_ctl);
2030 	}
2031 	pciide_print_modes(cp);
2032 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2033 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2034 }
2035 
2036 void
2037 apollo_chip_map(sc, pa)
2038 	struct pciide_softc *sc;
2039 	struct pci_attach_args *pa;
2040 {
2041 	struct pciide_channel *cp;
2042 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2043 	int channel;
2044 	u_int32_t ideconf;
2045 	bus_size_t cmdsize, ctlsize;
2046 	pcitag_t pcib_tag;
2047 	pcireg_t pcib_id, pcib_class;
2048 
2049 	if (pciide_chipen(sc, pa) == 0)
2050 		return;
2051 	/* get a PCI tag for the ISA bridge (function 0 of the same device) */
2052 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2053 	/* and read ID and rev of the ISA bridge */
2054 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2055 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2056 	printf(": VIA Technologies ");
2057 	switch (PCI_PRODUCT(pcib_id)) {
2058 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2059 		printf("VT82C586 (Apollo VP) ");
2060 		if(PCI_REVISION(pcib_class) >= 0x02) {
2061 			printf("ATA33 controller\n");
2062 			sc->sc_wdcdev.UDMA_cap = 2;
2063 		} else {
2064 			printf("controller\n");
2065 			sc->sc_wdcdev.UDMA_cap = 0;
2066 		}
2067 		break;
2068 	case PCI_PRODUCT_VIATECH_VT82C596A:
2069 		printf("VT82C596A (Apollo Pro) ");
2070 		if (PCI_REVISION(pcib_class) >= 0x12) {
2071 			printf("ATA66 controller\n");
2072 			sc->sc_wdcdev.UDMA_cap = 4;
2073 		} else {
2074 			printf("ATA33 controller\n");
2075 			sc->sc_wdcdev.UDMA_cap = 2;
2076 		}
2077 		break;
2078 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2079 		printf("VT82C686A (Apollo KX133) ");
2080 		if (PCI_REVISION(pcib_class) >= 0x40) {
2081 			printf("ATA100 controller\n");
2082 			sc->sc_wdcdev.UDMA_cap = 5;
2083 		} else {
2084 			printf("ATA66 controller\n");
2085 			sc->sc_wdcdev.UDMA_cap = 4;
2086 		}
2087 		break;
2088 	default:
2089 		printf("unknown ATA controller\n");
2090 		sc->sc_wdcdev.UDMA_cap = 0;
2091 	}
2092 
2093 	printf("%s: bus-master DMA support present",
2094 	    sc->sc_wdcdev.sc_dev.dv_xname);
2095 	pciide_mapreg_dma(sc, pa);
2096 	printf("\n");
2097 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2098 	    WDC_CAPABILITY_MODE;
2099 	if (sc->sc_dma_ok) {
2100 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2101 		sc->sc_wdcdev.irqack = pciide_irqack;
2102 		if (sc->sc_wdcdev.UDMA_cap > 0)
2103 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2104 	}
2105 	sc->sc_wdcdev.PIO_cap = 4;
2106 	sc->sc_wdcdev.DMA_cap = 2;
2107 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2108 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2109 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2110 
2111 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2112 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2113 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2114 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2115 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2116 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2117 	    DEBUG_PROBE);
2118 
2119 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2120 		cp = &sc->pciide_channels[channel];
2121 		if (pciide_chansetup(sc, channel, interface) == 0)
2122 			continue;
2123 
2124 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2125 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2126 			printf("%s: %s channel ignored (disabled)\n",
2127 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2128 			continue;
2129 		}
2130 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2131 		    pciide_pci_intr);
2132 		if (cp->hw_ok == 0)
2133 			continue;
2134 		if (pciide_chan_candisable(cp)) {
2135 			ideconf &= ~APO_IDECONF_EN(channel);
2136 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2137 			    ideconf);
2138 		}
2139 		pciide_map_compat_intr(pa, cp, channel, interface);
2140 
2141 		if (cp->hw_ok == 0)
2142 			continue;
2143 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2144 	}
2145 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2146 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2147 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2148 }
2149 
2150 void
2151 apollo_setup_channel(chp)
2152 	struct channel_softc *chp;
2153 {
2154 	u_int32_t udmatim_reg, datatim_reg;
2155 	u_int8_t idedma_ctl;
2156 	int mode, drive;
2157 	struct ata_drive_datas *drvp;
2158 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2159 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2160 
2161 	idedma_ctl = 0;
2162 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2163 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2164 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2165 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2166 
2167 	/* setup DMA if needed */
2168 	pciide_channel_dma_setup(cp);
2169 
2170 	for (drive = 0; drive < 2; drive++) {
2171 		drvp = &chp->ch_drive[drive];
2172 		/* If no drive, skip */
2173 		if ((drvp->drive_flags & DRIVE) == 0)
2174 			continue;
2175 		/* add timing values, setup DMA if needed */
2176 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2177 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2178 			mode = drvp->PIO_mode;
2179 			goto pio;
2180 		}
2181 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2182 		    (drvp->drive_flags & DRIVE_UDMA)) {
2183 			/* use Ultra/DMA */
2184 			drvp->drive_flags &= ~DRIVE_DMA;
2185 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2186 			    APO_UDMA_EN_MTH(chp->channel, drive);
2187 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2188 				/* 686b */
2189 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2190 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2191 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2192 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2193 				/* 596b or 686a */
2194 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2195 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2196 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2197 			} else {
2198 				/* 596a or 586b */
2199 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2200 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2201 			}
2202 			/* can use PIO timings, MW DMA unused */
2203 			mode = drvp->PIO_mode;
2204 		} else {
2205 			/* use Multiword DMA */
2206 			drvp->drive_flags &= ~DRIVE_UDMA;
2207 			/* mode = min(pio, dma+2) */
2208 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2209 				mode = drvp->PIO_mode;
2210 			else
2211 				mode = drvp->DMA_mode + 2;
2212 		}
2213 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2214 
2215 pio:		/* setup PIO mode */
2216 		if (mode <= 2) {
2217 			drvp->DMA_mode = 0;
2218 			drvp->PIO_mode = 0;
2219 			mode = 0;
2220 		} else {
2221 			drvp->PIO_mode = mode;
2222 			drvp->DMA_mode = mode - 2;
2223 		}
2224 		datatim_reg |=
2225 		    APO_DATATIM_PULSE(chp->channel, drive,
2226 			apollo_pio_set[mode]) |
2227 		    APO_DATATIM_RECOV(chp->channel, drive,
2228 			apollo_pio_rec[mode]);
2229 	}
2230 	if (idedma_ctl != 0) {
2231 		/* Add software bits in status register */
2232 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2233 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2234 		    idedma_ctl);
2235 	}
2236 	pciide_print_modes(cp);
2237 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2238 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2239 }
2240 
2241 void
2242 cmd_channel_map(pa, sc, channel)
2243 	struct pci_attach_args *pa;
2244 	struct pciide_softc *sc;
2245 	int channel;
2246 {
2247 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2248 	bus_size_t cmdsize, ctlsize;
2249 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2250 	int interface;
2251 
2252 	/*
2253 	 * The 0648/0649 can be told to identify as a RAID controller.
2254 	 * In this case, we have to fake interface
2255 	 */
2256 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2257 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2258 		    PCIIDE_INTERFACE_SETTABLE(1);
2259 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2260 		    CMD_CONF_DSA1)
2261 			interface |= PCIIDE_INTERFACE_PCI(0) |
2262 			    PCIIDE_INTERFACE_PCI(1);
2263 	} else {
2264 		interface = PCI_INTERFACE(pa->pa_class);
2265 	}
2266 
2267 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2268 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2269 	cp->wdc_channel.channel = channel;
2270 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2271 
2272 	if (channel > 0) {
2273 		cp->wdc_channel.ch_queue =
2274 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2275 	} else {
2276 		cp->wdc_channel.ch_queue =
2277 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2278 	}
2279 	if (cp->wdc_channel.ch_queue == NULL) {
2280 		printf("%s %s channel: "
2281 		    "can't allocate memory for command queue",
2282 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2283 		    return;
2284 	}
2285 
2286 	printf("%s: %s channel %s to %s mode\n",
2287 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2288 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2289 	    "configured" : "wired",
2290 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2291 	    "native-PCI" : "compatibility");
2292 
2293 	/*
2294 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2295 	 * there's no way to disable the first channel without disabling
2296 	 * the whole device
2297 	 */
2298 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2299 		printf("%s: %s channel ignored (disabled)\n",
2300 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2301 		return;
2302 	}
2303 
2304 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2305 	if (cp->hw_ok == 0)
2306 		return;
2307 	if (channel == 1) {
2308 		if (pciide_chan_candisable(cp)) {
2309 			ctrl &= ~CMD_CTRL_2PORT;
2310 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2311 			    CMD_CTRL, ctrl);
2312 		}
2313 	}
2314 	pciide_map_compat_intr(pa, cp, channel, interface);
2315 }
2316 
2317 int
2318 cmd_pci_intr(arg)
2319 	void *arg;
2320 {
2321 	struct pciide_softc *sc = arg;
2322 	struct pciide_channel *cp;
2323 	struct channel_softc *wdc_cp;
2324 	int i, rv, crv;
2325 	u_int32_t priirq, secirq;
2326 
2327 	rv = 0;
2328 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2329 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2330 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2331 		cp = &sc->pciide_channels[i];
2332 		wdc_cp = &cp->wdc_channel;
2333 		/* If a compat channel skip. */
2334 		if (cp->compat)
2335 			continue;
2336 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2337 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2338 			crv = wdcintr(wdc_cp);
2339 			if (crv == 0)
2340 				printf("%s:%d: bogus intr\n",
2341 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2342 			else
2343 				rv = 1;
2344 		}
2345 	}
2346 	return rv;
2347 }
2348 
2349 void
2350 cmd_chip_map(sc, pa)
2351 	struct pciide_softc *sc;
2352 	struct pci_attach_args *pa;
2353 {
2354 	int channel;
2355 
2356 	/*
2357 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2358 	 * and base adresses registers can be disabled at
2359 	 * hardware level. In this case, the device is wired
2360 	 * in compat mode and its first channel is always enabled,
2361 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2362 	 * In fact, it seems that the first channel of the CMD PCI0640
2363 	 * can't be disabled.
2364 	 */
2365 
2366 #ifdef PCIIDE_CMD064x_DISABLE
2367 	if (pciide_chipen(sc, pa) == 0)
2368 		return;
2369 #endif
2370 
2371 	printf("%s: hardware does not support DMA\n",
2372 	    sc->sc_wdcdev.sc_dev.dv_xname);
2373 	sc->sc_dma_ok = 0;
2374 
2375 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2376 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2377 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2378 
2379 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2380 		cmd_channel_map(pa, sc, channel);
2381 	}
2382 }
2383 
2384 void
2385 cmd0643_9_chip_map(sc, pa)
2386 	struct pciide_softc *sc;
2387 	struct pci_attach_args *pa;
2388 {
2389 	struct pciide_channel *cp;
2390 	int channel;
2391 	int rev = PCI_REVISION(
2392 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2393 
2394 	/*
2395 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2396 	 * and base adresses registers can be disabled at
2397 	 * hardware level. In this case, the device is wired
2398 	 * in compat mode and its first channel is always enabled,
2399 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2400 	 * In fact, it seems that the first channel of the CMD PCI0640
2401 	 * can't be disabled.
2402 	 */
2403 
2404 #ifdef PCIIDE_CMD064x_DISABLE
2405 	if (pciide_chipen(sc, pa) == 0)
2406 		return;
2407 #endif
2408 	printf("%s: bus-master DMA support present",
2409 	    sc->sc_wdcdev.sc_dev.dv_xname);
2410 	pciide_mapreg_dma(sc, pa);
2411 	printf("\n");
2412 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2413 	    WDC_CAPABILITY_MODE;
2414 	if (sc->sc_dma_ok) {
2415 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2416 		switch (sc->sc_pp->ide_product) {
2417 		case PCI_PRODUCT_CMDTECH_649:
2418 		case PCI_PRODUCT_CMDTECH_648:
2419 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2420 			sc->sc_wdcdev.UDMA_cap = 4;
2421 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2422 			break;
2423 		case PCI_PRODUCT_CMDTECH_646:
2424 			if (rev >= CMD0646U2_REV) {
2425 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2426 				sc->sc_wdcdev.UDMA_cap = 2;
2427 			} else if (rev >= CMD0646U_REV) {
2428 			/*
2429 			 * Linux's driver claims that the 646U is broken
2430 			 * with UDMA. Only enable it if we know what we're
2431 			 * doing
2432 			 */
2433 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2434 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2435 				sc->sc_wdcdev.UDMA_cap = 2;
2436 #endif
2437 				/* explicitely disable UDMA */
2438 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2439 				    CMD_UDMATIM(0), 0);
2440 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2441 				    CMD_UDMATIM(1), 0);
2442 			}
2443 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2444 			break;
2445 		default:
2446 			sc->sc_wdcdev.irqack = pciide_irqack;
2447 		}
2448 	}
2449 
2450 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2451 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2452 	sc->sc_wdcdev.PIO_cap = 4;
2453 	sc->sc_wdcdev.DMA_cap = 2;
2454 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2455 
2456 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2457 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2458 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2459 		DEBUG_PROBE);
2460 
2461 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2462 		cp = &sc->pciide_channels[channel];
2463 		cmd_channel_map(pa, sc, channel);
2464 		if (cp->hw_ok == 0)
2465 			continue;
2466 		cmd0643_9_setup_channel(&cp->wdc_channel);
2467 	}
2468 	/*
2469 	 * note - this also makes sure we clear the irq disable and reset
2470 	 * bits
2471 	 */
2472 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2473 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2474 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2475 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2476 	    DEBUG_PROBE);
2477 }
2478 
2479 void
2480 cmd0643_9_setup_channel(chp)
2481 	struct channel_softc *chp;
2482 {
2483 	struct ata_drive_datas *drvp;
2484 	u_int8_t tim;
2485 	u_int32_t idedma_ctl, udma_reg;
2486 	int drive;
2487 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2488 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2489 
2490 	idedma_ctl = 0;
2491 	/* setup DMA if needed */
2492 	pciide_channel_dma_setup(cp);
2493 
2494 	for (drive = 0; drive < 2; drive++) {
2495 		drvp = &chp->ch_drive[drive];
2496 		/* If no drive, skip */
2497 		if ((drvp->drive_flags & DRIVE) == 0)
2498 			continue;
2499 		/* add timing values, setup DMA if needed */
2500 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2501 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2502 			if (drvp->drive_flags & DRIVE_UDMA) {
2503 				/* UltraDMA on a 646U2, 0648 or 0649 */
2504 				drvp->drive_flags &= ~DRIVE_DMA;
2505 				udma_reg = pciide_pci_read(sc->sc_pc,
2506 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2507 				if (drvp->UDMA_mode > 2 &&
2508 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2509 				    CMD_BICSR) &
2510 				    CMD_BICSR_80(chp->channel)) == 0)
2511 					drvp->UDMA_mode = 2;
2512 				if (drvp->UDMA_mode > 2)
2513 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2514 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2515 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2516 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2517 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2518 				    CMD_UDMATIM_TIM_OFF(drive));
2519 				udma_reg |=
2520 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2521 				    CMD_UDMATIM_TIM_OFF(drive));
2522 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2523 				    CMD_UDMATIM(chp->channel), udma_reg);
2524 			} else {
2525 				/*
2526 				 * use Multiword DMA.
2527 				 * Timings will be used for both PIO and DMA,
2528 				 * so adjust DMA mode if needed
2529 				 * if we have a 0646U2/8/9, turn off UDMA
2530 				 */
2531 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2532 					udma_reg = pciide_pci_read(sc->sc_pc,
2533 					    sc->sc_tag,
2534 					    CMD_UDMATIM(chp->channel));
2535 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2536 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2537 					    CMD_UDMATIM(chp->channel),
2538 					    udma_reg);
2539 				}
2540 				if (drvp->PIO_mode >= 3 &&
2541 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2542 					drvp->DMA_mode = drvp->PIO_mode - 2;
2543 				}
2544 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2545 			}
2546 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2547 		}
2548 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2549 		    CMD_DATA_TIM(chp->channel, drive), tim);
2550 	}
2551 	if (idedma_ctl != 0) {
2552 		/* Add software bits in status register */
2553 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2554 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2555 		    idedma_ctl);
2556 	}
2557 	pciide_print_modes(cp);
2558 }
2559 
2560 void
2561 cmd646_9_irqack(chp)
2562 	struct channel_softc *chp;
2563 {
2564 	u_int32_t priirq, secirq;
2565 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2566 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2567 
2568 	if (chp->channel == 0) {
2569 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2570 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2571 	} else {
2572 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2573 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2574 	}
2575 	pciide_irqack(chp);
2576 }
2577 
2578 void
2579 cy693_chip_map(sc, pa)
2580 	struct pciide_softc *sc;
2581 	struct pci_attach_args *pa;
2582 {
2583 	struct pciide_channel *cp;
2584 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2585 	bus_size_t cmdsize, ctlsize;
2586 
2587 	if (pciide_chipen(sc, pa) == 0)
2588 		return;
2589 	/*
2590 	 * this chip has 2 PCI IDE functions, one for primary and one for
2591 	 * secondary. So we need to call pciide_mapregs_compat() with
2592 	 * the real channel
2593 	 */
2594 	if (pa->pa_function == 1) {
2595 		sc->sc_cy_compatchan = 0;
2596 	} else if (pa->pa_function == 2) {
2597 		sc->sc_cy_compatchan = 1;
2598 	} else {
2599 		printf("%s: unexpected PCI function %d\n",
2600 		    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2601 		return;
2602 	}
2603 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2604 		printf("%s: bus-master DMA support present",
2605 		    sc->sc_wdcdev.sc_dev.dv_xname);
2606 		pciide_mapreg_dma(sc, pa);
2607 	} else {
2608 		printf("%s: hardware does not support DMA",
2609 		    sc->sc_wdcdev.sc_dev.dv_xname);
2610 		sc->sc_dma_ok = 0;
2611 	}
2612 	printf("\n");
2613 
2614 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2615 	if (sc->sc_cy_handle == NULL) {
2616 		printf("%s: unable to map hyperCache control registers\n",
2617 		    sc->sc_wdcdev.sc_dev.dv_xname);
2618 		sc->sc_dma_ok = 0;
2619 	}
2620 
2621 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2622 	    WDC_CAPABILITY_MODE;
2623 	if (sc->sc_dma_ok) {
2624 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2625 		sc->sc_wdcdev.irqack = pciide_irqack;
2626 	}
2627 	sc->sc_wdcdev.PIO_cap = 4;
2628 	sc->sc_wdcdev.DMA_cap = 2;
2629 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2630 
2631 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2632 	sc->sc_wdcdev.nchannels = 1;
2633 
2634 	/* Only one channel for this chip; if we are here it's enabled */
2635 	cp = &sc->pciide_channels[0];
2636 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2637 	cp->name = PCIIDE_CHANNEL_NAME(0);
2638 	cp->wdc_channel.channel = 0;
2639 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2640 	cp->wdc_channel.ch_queue =
2641 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2642 	if (cp->wdc_channel.ch_queue == NULL) {
2643 		printf("%s primary channel: "
2644 		    "can't allocate memory for command queue",
2645 		sc->sc_wdcdev.sc_dev.dv_xname);
2646 		return;
2647 	}
2648 	printf("%s: primary channel %s to ",
2649 	    sc->sc_wdcdev.sc_dev.dv_xname,
2650 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2651 	    "configured" : "wired");
2652 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2653 		printf("native-PCI");
2654 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2655 		    pciide_pci_intr);
2656 	} else {
2657 		printf("compatibility");
2658 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2659 		    &cmdsize, &ctlsize);
2660 	}
2661 	printf(" mode\n");
2662 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2663 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2664 	wdcattach(&cp->wdc_channel);
2665 	if (pciide_chan_candisable(cp)) {
2666 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2667 		    PCI_COMMAND_STATUS_REG, 0);
2668 	}
2669 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2670 	if (cp->hw_ok == 0)
2671 		return;
2672 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2673 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2674 	cy693_setup_channel(&cp->wdc_channel);
2675 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2676 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2677 }
2678 
2679 void
2680 cy693_setup_channel(chp)
2681 	struct channel_softc *chp;
2682 {
2683 	struct ata_drive_datas *drvp;
2684 	int drive;
2685 	u_int32_t cy_cmd_ctrl;
2686 	u_int32_t idedma_ctl;
2687 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2688 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2689 	int dma_mode = -1;
2690 
2691 	cy_cmd_ctrl = idedma_ctl = 0;
2692 
2693 	/* setup DMA if needed */
2694 	pciide_channel_dma_setup(cp);
2695 
2696 	for (drive = 0; drive < 2; drive++) {
2697 		drvp = &chp->ch_drive[drive];
2698 		/* If no drive, skip */
2699 		if ((drvp->drive_flags & DRIVE) == 0)
2700 			continue;
2701 		/* add timing values, setup DMA if needed */
2702 		if (drvp->drive_flags & DRIVE_DMA) {
2703 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2704 			/* use Multiword DMA */
2705 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2706 				dma_mode = drvp->DMA_mode;
2707 		}
2708 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2709 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2710 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2711 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2712 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2713 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2714 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2715 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2716 	}
2717 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2718 	chp->ch_drive[0].DMA_mode = dma_mode;
2719 	chp->ch_drive[1].DMA_mode = dma_mode;
2720 
2721 	if (dma_mode == -1)
2722 		dma_mode = 0;
2723 
2724 	if (sc->sc_cy_handle != NULL) {
2725 		/* Note: `multiple' is implied. */
2726 		cy82c693_write(sc->sc_cy_handle,
2727 		    (sc->sc_cy_compatchan == 0) ?
2728 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2729 	}
2730 
2731 	pciide_print_modes(cp);
2732 
2733 	if (idedma_ctl != 0) {
2734 		/* Add software bits in status register */
2735 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2736 		    IDEDMA_CTL, idedma_ctl);
2737 	}
2738 }
2739 
2740 void
2741 sis_chip_map(sc, pa)
2742 	struct pciide_softc *sc;
2743 	struct pci_attach_args *pa;
2744 {
2745 	struct pciide_channel *cp;
2746 	int channel;
2747 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2748 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2749 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2750 	bus_size_t cmdsize, ctlsize;
2751 	pcitag_t pchb_tag;
2752 	pcireg_t pchb_id, pchb_class;
2753 
2754 	if (pciide_chipen(sc, pa) == 0)
2755 		return;
2756 	printf("%s: bus-master DMA support present",
2757 	    sc->sc_wdcdev.sc_dev.dv_xname);
2758 	pciide_mapreg_dma(sc, pa);
2759 	printf("\n");
2760 
2761 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2762 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2763 	/* and read ID and rev of the ISA bridge */
2764 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2765 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2766 
2767 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2768 	    WDC_CAPABILITY_MODE;
2769 	if (sc->sc_dma_ok) {
2770 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2771 		sc->sc_wdcdev.irqack = pciide_irqack;
2772 		/*
2773 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2774 		 * have problems with UDMA (info provided by Christos)
2775 		 */
2776 		if (rev >= 0xd0 &&
2777 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2778 		    PCI_REVISION(pchb_class) >= 0x03))
2779 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2780 	}
2781 
2782 	sc->sc_wdcdev.PIO_cap = 4;
2783 	sc->sc_wdcdev.DMA_cap = 2;
2784 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2785 		sc->sc_wdcdev.UDMA_cap = 2;
2786 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2787 
2788 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2789 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2790 
2791 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2792 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2793 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2794 
2795 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2796 		cp = &sc->pciide_channels[channel];
2797 		if (pciide_chansetup(sc, channel, interface) == 0)
2798 			continue;
2799 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2800 		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2801 			printf("%s: %s channel ignored (disabled)\n",
2802 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2803 			continue;
2804 		}
2805 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2806 		    pciide_pci_intr);
2807 		if (cp->hw_ok == 0)
2808 			continue;
2809 		if (pciide_chan_candisable(cp)) {
2810 			if (channel == 0)
2811 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2812 			else
2813 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2814 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2815 			    sis_ctr0);
2816 		}
2817 		pciide_map_compat_intr(pa, cp, channel, interface);
2818 		if (cp->hw_ok == 0)
2819 			continue;
2820 		sis_setup_channel(&cp->wdc_channel);
2821 	}
2822 }
2823 
2824 void
2825 sis_setup_channel(chp)
2826 	struct channel_softc *chp;
2827 {
2828 	struct ata_drive_datas *drvp;
2829 	int drive;
2830 	u_int32_t sis_tim;
2831 	u_int32_t idedma_ctl;
2832 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2833 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2834 
2835 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2836 	    "channel %d 0x%x\n", chp->channel,
2837 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2838 	    DEBUG_PROBE);
2839 	sis_tim = 0;
2840 	idedma_ctl = 0;
2841 	/* setup DMA if needed */
2842 	pciide_channel_dma_setup(cp);
2843 
2844 	for (drive = 0; drive < 2; drive++) {
2845 		drvp = &chp->ch_drive[drive];
2846 		/* If no drive, skip */
2847 		if ((drvp->drive_flags & DRIVE) == 0)
2848 			continue;
2849 		/* add timing values, setup DMA if needed */
2850 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2851 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2852 			goto pio;
2853 
2854 		if (drvp->drive_flags & DRIVE_UDMA) {
2855 			/* use Ultra/DMA */
2856 			drvp->drive_flags &= ~DRIVE_DMA;
2857 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2858 			    SIS_TIM_UDMA_TIME_OFF(drive);
2859 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2860 		} else {
2861 			/*
2862 			 * use Multiword DMA
2863 			 * Timings will be used for both PIO and DMA,
2864 			 * so adjust DMA mode if needed
2865 			 */
2866 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2867 				drvp->PIO_mode = drvp->DMA_mode + 2;
2868 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2869 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2870 				    drvp->PIO_mode - 2 : 0;
2871 			if (drvp->DMA_mode == 0)
2872 				drvp->PIO_mode = 0;
2873 		}
2874 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2875 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2876 		    SIS_TIM_ACT_OFF(drive);
2877 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2878 		    SIS_TIM_REC_OFF(drive);
2879 	}
2880 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2881 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2882 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2883 	if (idedma_ctl != 0) {
2884 		/* Add software bits in status register */
2885 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2886 		    IDEDMA_CTL, idedma_ctl);
2887 	}
2888 	pciide_print_modes(cp);
2889 }
2890 
2891 int
2892 acer_isabr_match(pa)
2893 	struct pci_attach_args *pa;
2894 {
2895 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI &&
2896 	   PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543)
2897 		return 1;
2898 	return 0;
2899 }
2900 
2901 
2902 void
2903 acer_chip_map(sc, pa)
2904 	struct pciide_softc *sc;
2905 	struct pci_attach_args *pa;
2906 {
2907 	struct pci_attach_args isa_pa;
2908 	struct pciide_channel *cp;
2909 	int channel;
2910 	pcireg_t cr, interface;
2911 	bus_size_t cmdsize, ctlsize;
2912 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2913 
2914 	if (pciide_chipen(sc, pa) == 0)
2915 		return;
2916 	printf("%s: bus-master DMA support present",
2917 	    sc->sc_wdcdev.sc_dev.dv_xname);
2918 	pciide_mapreg_dma(sc, pa);
2919 	printf("\n");
2920 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2921 	    WDC_CAPABILITY_MODE;
2922 	if (sc->sc_dma_ok) {
2923 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2924 		if (rev >= 0x20) {
2925 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2926 			if (rev >= 0xC4)
2927 				sc->sc_wdcdev.UDMA_cap = 5;
2928 			else if (rev >= 0xC2)
2929 				sc->sc_wdcdev.UDMA_cap = 4;
2930 			else
2931 				sc->sc_wdcdev.UDMA_cap = 2;
2932 		}
2933 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2934 		sc->sc_wdcdev.irqack = pciide_irqack;
2935 	}
2936 
2937 	sc->sc_wdcdev.PIO_cap = 4;
2938 	sc->sc_wdcdev.DMA_cap = 2;
2939 	sc->sc_wdcdev.set_modes = acer_setup_channel;
2940 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2941 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2942 
2943 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2944 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2945 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2946 
2947 	/* Enable "microsoft register bits" R/W. */
2948 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2949 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2950 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2951 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2952 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2953 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2954 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2955 	    ~ACER_CHANSTATUSREGS_RO);
2956 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2957 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2958 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2959 	/* Don't use cr, re-read the real register content instead */
2960 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2961 	    PCI_CLASS_REG));
2962 
2963 	/* From linux: enable "Cable Detection" */
2964 	if (rev >= 0xC2) {
2965 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
2966 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
2967 		    | ACER_0x4B_CDETECT);
2968 		/* set south-bridge's enable bit, m1533, 0x79 */
2969 		if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
2970 			printf("%s: can't find PCI/ISA bridge, downgrading "
2971 			    "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
2972 			sc->sc_wdcdev.UDMA_cap = 2;
2973 		} else {
2974 			if (rev == 0xC2)
2975 				/* 1543C-B0 (m1533, 0x79, bit 2) */
2976 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
2977 				    ACER_0x79,
2978 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
2979 					ACER_0x79)
2980 				    | ACER_0x79_REVC2_EN);
2981 			else
2982 				/* 1553/1535 (m1533, 0x79, bit 1) */
2983 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
2984 				    ACER_0x79,
2985 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
2986 					ACER_0x79)
2987 				    | ACER_0x79_EN);
2988 		}
2989 	}
2990 
2991 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2992 		cp = &sc->pciide_channels[channel];
2993 		if (pciide_chansetup(sc, channel, interface) == 0)
2994 			continue;
2995 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
2996 			printf("%s: %s channel ignored (disabled)\n",
2997 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2998 			continue;
2999 		}
3000 		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
3001 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3002 		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3003 		if (cp->hw_ok == 0)
3004 			continue;
3005 		if (pciide_chan_candisable(cp)) {
3006 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3007 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3008 			    PCI_CLASS_REG, cr);
3009 		}
3010 		pciide_map_compat_intr(pa, cp, channel, interface);
3011 		acer_setup_channel(&cp->wdc_channel);
3012 	}
3013 }
3014 
3015 void
3016 acer_setup_channel(chp)
3017 	struct channel_softc *chp;
3018 {
3019 	struct ata_drive_datas *drvp;
3020 	int drive;
3021 	u_int32_t acer_fifo_udma;
3022 	u_int32_t idedma_ctl;
3023 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3024 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3025 
3026 	idedma_ctl = 0;
3027 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3028 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3029 	    acer_fifo_udma), DEBUG_PROBE);
3030 	/* setup DMA if needed */
3031 	pciide_channel_dma_setup(cp);
3032 
3033 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3034 	    DRIVE_UDMA) { /* check 80 pins cable */
3035 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3036 		    ACER_0x4A_80PIN(chp->channel)) {
3037 			if (chp->ch_drive[0].UDMA_mode > 2)
3038 				chp->ch_drive[0].UDMA_mode = 2;
3039 			if (chp->ch_drive[1].UDMA_mode > 2)
3040 				chp->ch_drive[1].UDMA_mode = 2;
3041 		}
3042 	}
3043 
3044 	for (drive = 0; drive < 2; drive++) {
3045 		drvp = &chp->ch_drive[drive];
3046 		/* If no drive, skip */
3047 		if ((drvp->drive_flags & DRIVE) == 0)
3048 			continue;
3049 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3050 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3051 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3052 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3053 		/* clear FIFO/DMA mode */
3054 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3055 		    ACER_UDMA_EN(chp->channel, drive) |
3056 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3057 
3058 		/* add timing values, setup DMA if needed */
3059 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3060 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3061 			acer_fifo_udma |=
3062 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3063 			goto pio;
3064 		}
3065 
3066 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3067 		if (drvp->drive_flags & DRIVE_UDMA) {
3068 			/* use Ultra/DMA */
3069 			drvp->drive_flags &= ~DRIVE_DMA;
3070 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3071 			acer_fifo_udma |=
3072 			    ACER_UDMA_TIM(chp->channel, drive,
3073 				acer_udma[drvp->UDMA_mode]);
3074 			/* XXX disable if one drive < UDMA3 ? */
3075 			if (drvp->UDMA_mode >= 3) {
3076 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3077 				    ACER_0x4B,
3078 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3079 					ACER_0x4B) | ACER_0x4B_UDMA66);
3080 			}
3081 		} else {
3082 			/*
3083 			 * use Multiword DMA
3084 			 * Timings will be used for both PIO and DMA,
3085 			 * so adjust DMA mode if needed
3086 			 */
3087 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3088 				drvp->PIO_mode = drvp->DMA_mode + 2;
3089 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3090 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3091 				    drvp->PIO_mode - 2 : 0;
3092 			if (drvp->DMA_mode == 0)
3093 				drvp->PIO_mode = 0;
3094 		}
3095 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3096 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3097 		    ACER_IDETIM(chp->channel, drive),
3098 		    acer_pio[drvp->PIO_mode]);
3099 	}
3100 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3101 	    acer_fifo_udma), DEBUG_PROBE);
3102 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3103 	if (idedma_ctl != 0) {
3104 		/* Add software bits in status register */
3105 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3106 		    IDEDMA_CTL, idedma_ctl);
3107 	}
3108 	pciide_print_modes(cp);
3109 }
3110 
3111 int
3112 acer_pci_intr(arg)
3113 	void *arg;
3114 {
3115 	struct pciide_softc *sc = arg;
3116 	struct pciide_channel *cp;
3117 	struct channel_softc *wdc_cp;
3118 	int i, rv, crv;
3119 	u_int32_t chids;
3120 
3121 	rv = 0;
3122 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3123 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3124 		cp = &sc->pciide_channels[i];
3125 		wdc_cp = &cp->wdc_channel;
3126 		/* If a compat channel skip. */
3127 		if (cp->compat)
3128 			continue;
3129 		if (chids & ACER_CHIDS_INT(i)) {
3130 			crv = wdcintr(wdc_cp);
3131 			if (crv == 0)
3132 				printf("%s:%d: bogus intr\n",
3133 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3134 			else
3135 				rv = 1;
3136 		}
3137 	}
3138 	return rv;
3139 }
3140 
3141 void
3142 hpt_chip_map(sc, pa)
3143 	struct pciide_softc *sc;
3144 	struct pci_attach_args *pa;
3145 {
3146 	struct pciide_channel *cp;
3147 	int i, compatchan, revision;
3148 	pcireg_t interface;
3149 	bus_size_t cmdsize, ctlsize;
3150 
3151 	if (pciide_chipen(sc, pa) == 0)
3152 		return;
3153 	revision = PCI_REVISION(pa->pa_class);
3154 	printf(": Triones/Highpoint ");
3155 	if (revision == HPT370_REV)
3156 		printf("HPT370 IDE Controller\n");
3157 	else if (revision == HPT370A_REV)
3158 		printf("HPT370A IDE Controller\n");
3159 	else if (revision == HPT366_REV)
3160 		printf("HPT366 IDE Controller\n");
3161 	else
3162 		printf("unknown HPT IDE controller rev %d\n", revision);
3163 
3164 	/*
3165 	 * when the chip is in native mode it identifies itself as a
3166 	 * 'misc mass storage'. Fake interface in this case.
3167 	 */
3168 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3169 		interface = PCI_INTERFACE(pa->pa_class);
3170 	} else {
3171 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3172 		    PCIIDE_INTERFACE_PCI(0);
3173 		if (revision == HPT370_REV || revision == HPT370A_REV)
3174 			interface |= PCIIDE_INTERFACE_PCI(1);
3175 	}
3176 
3177 	printf("%s: bus-master DMA support present",
3178 		sc->sc_wdcdev.sc_dev.dv_xname);
3179 	pciide_mapreg_dma(sc, pa);
3180 	printf("\n");
3181 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3182 	    WDC_CAPABILITY_MODE;
3183 	if (sc->sc_dma_ok) {
3184 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3185 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3186 		sc->sc_wdcdev.irqack = pciide_irqack;
3187 	}
3188 	sc->sc_wdcdev.PIO_cap = 4;
3189 	sc->sc_wdcdev.DMA_cap = 2;
3190 
3191 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3192 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3193 	if (revision == HPT366_REV) {
3194 		sc->sc_wdcdev.UDMA_cap = 4;
3195 		/*
3196 		 * The 366 has 2 PCI IDE functions, one for primary and one
3197 		 * for secondary. So we need to call pciide_mapregs_compat()
3198 		 * with the real channel
3199 		 */
3200 		if (pa->pa_function == 0) {
3201 			compatchan = 0;
3202 		} else if (pa->pa_function == 1) {
3203 			compatchan = 1;
3204 		} else {
3205 			printf("%s: unexpected PCI function %d\n",
3206 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3207 			return;
3208 		}
3209 		sc->sc_wdcdev.nchannels = 1;
3210 	} else {
3211 		sc->sc_wdcdev.nchannels = 2;
3212 		sc->sc_wdcdev.UDMA_cap = 5;
3213 	}
3214 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3215 		cp = &sc->pciide_channels[i];
3216 		if (sc->sc_wdcdev.nchannels > 1) {
3217 			compatchan = i;
3218 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3219 			   HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3220 				printf("%s: %s channel ignored (disabled)\n",
3221 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3222 				continue;
3223 			}
3224 		}
3225 		if (pciide_chansetup(sc, i, interface) == 0)
3226 			continue;
3227 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3228 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3229 			    &ctlsize, hpt_pci_intr);
3230 		} else {
3231 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3232 			    &cmdsize, &ctlsize);
3233 		}
3234 		if (cp->hw_ok == 0)
3235 			return;
3236 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3237 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3238 		wdcattach(&cp->wdc_channel);
3239 		hpt_setup_channel(&cp->wdc_channel);
3240 	}
3241 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3242 		/*
3243 		 * HPT370_REV has a bit to disable interrupts, make sure
3244 		 * to clear it
3245 		 */
3246 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3247 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3248 		    ~HPT_CSEL_IRQDIS);
3249 	}
3250 	return;
3251 }
3252 
3253 void
3254 hpt_setup_channel(chp)
3255 	struct channel_softc *chp;
3256 {
3257 	struct ata_drive_datas *drvp;
3258 	int drive;
3259 	int cable;
3260 	u_int32_t before, after;
3261 	u_int32_t idedma_ctl;
3262 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3263 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3264 
3265 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3266 
3267 	/* setup DMA if needed */
3268 	pciide_channel_dma_setup(cp);
3269 
3270 	idedma_ctl = 0;
3271 
3272 	/* Per drive settings */
3273 	for (drive = 0; drive < 2; drive++) {
3274 		drvp = &chp->ch_drive[drive];
3275 		/* If no drive, skip */
3276 		if ((drvp->drive_flags & DRIVE) == 0)
3277 			continue;
3278 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3279 					HPT_IDETIM(chp->channel, drive));
3280 
3281 		/* add timing values, setup DMA if needed */
3282 		if (drvp->drive_flags & DRIVE_UDMA) {
3283 			/* use Ultra/DMA */
3284 			drvp->drive_flags &= ~DRIVE_DMA;
3285 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3286 			    drvp->UDMA_mode > 2)
3287 				drvp->UDMA_mode = 2;
3288 			after = (sc->sc_wdcdev.nchannels == 2) ?
3289 			    hpt370_udma[drvp->UDMA_mode] :
3290 			    hpt366_udma[drvp->UDMA_mode];
3291 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3292 		} else if (drvp->drive_flags & DRIVE_DMA) {
3293 			/*
3294 			 * use Multiword DMA.
3295 			 * Timings will be used for both PIO and DMA, so adjust
3296 			 * DMA mode if needed
3297 			 */
3298 			if (drvp->PIO_mode >= 3 &&
3299 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3300 				drvp->DMA_mode = drvp->PIO_mode - 2;
3301 			}
3302 			after = (sc->sc_wdcdev.nchannels == 2) ?
3303 			    hpt370_dma[drvp->DMA_mode] :
3304 			    hpt366_dma[drvp->DMA_mode];
3305 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3306 		} else {
3307 			/* PIO only */
3308 			after = (sc->sc_wdcdev.nchannels == 2) ?
3309 			    hpt370_pio[drvp->PIO_mode] :
3310 			    hpt366_pio[drvp->PIO_mode];
3311 		}
3312 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3313 		    HPT_IDETIM(chp->channel, drive), after);
3314 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3315 		    "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3316 		    after, before), DEBUG_PROBE);
3317 	}
3318 	if (idedma_ctl != 0) {
3319 		/* Add software bits in status register */
3320 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3321 		    IDEDMA_CTL, idedma_ctl);
3322 	}
3323 	pciide_print_modes(cp);
3324 }
3325 
3326 int
3327 hpt_pci_intr(arg)
3328 	void *arg;
3329 {
3330 	struct pciide_softc *sc = arg;
3331 	struct pciide_channel *cp;
3332 	struct channel_softc *wdc_cp;
3333 	int rv = 0;
3334 	int dmastat, i, crv;
3335 
3336 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3337 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3338 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3339 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3340 			continue;
3341 		cp = &sc->pciide_channels[i];
3342 		wdc_cp = &cp->wdc_channel;
3343 		crv = wdcintr(wdc_cp);
3344 		if (crv == 0) {
3345 			printf("%s:%d: bogus intr\n",
3346 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3347 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3348 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3349 		} else
3350 			rv = 1;
3351 	}
3352 	return rv;
3353 }
3354 
3355 
3356 /* Macros to test product */
3357 #define PDC_IS_262(sc)							\
3358 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||	\
3359 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3360 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3361 #define PDC_IS_265(sc)							\
3362 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3363 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3364 
3365 void
3366 pdc202xx_chip_map(sc, pa)
3367 	struct pciide_softc *sc;
3368 	struct pci_attach_args *pa;
3369 {
3370 	struct pciide_channel *cp;
3371 	int channel;
3372 	pcireg_t interface, st, mode;
3373 	bus_size_t cmdsize, ctlsize;
3374 
3375 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3376 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3377 	    DEBUG_PROBE);
3378 	if (pciide_chipen(sc, pa) == 0)
3379 		return;
3380 
3381 	/* turn off  RAID mode */
3382 	st &= ~PDC2xx_STATE_IDERAID;
3383 
3384 	/*
3385 	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3386 	 * mode. We have to fake interface
3387 	 */
3388 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3389 	if (st & PDC2xx_STATE_NATIVE)
3390 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3391 
3392 	printf("%s: bus-master DMA support present",
3393 	    sc->sc_wdcdev.sc_dev.dv_xname);
3394 	pciide_mapreg_dma(sc, pa);
3395 	printf("\n");
3396 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3397 	    WDC_CAPABILITY_MODE;
3398 	if (sc->sc_dma_ok) {
3399 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3400 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3401 		sc->sc_wdcdev.irqack = pciide_irqack;
3402 	}
3403 	sc->sc_wdcdev.PIO_cap = 4;
3404 	sc->sc_wdcdev.DMA_cap = 2;
3405 	if (PDC_IS_265(sc))
3406 		sc->sc_wdcdev.UDMA_cap = 5;
3407 	else if (PDC_IS_262(sc))
3408 		sc->sc_wdcdev.UDMA_cap = 4;
3409 	else
3410 		sc->sc_wdcdev.UDMA_cap = 2;
3411 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3412 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3413 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3414 
3415 	/* setup failsafe defaults */
3416 	mode = 0;
3417 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3418 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3419 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3420 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3421 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3422 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3423 		    "initial timings  0x%x, now 0x%x\n", channel,
3424 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3425 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3426 		    DEBUG_PROBE);
3427 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3428 		    mode | PDC2xx_TIM_IORDYp);
3429 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3430 		    "initial timings  0x%x, now 0x%x\n", channel,
3431 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3432 		    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3433 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3434 		    mode);
3435 	}
3436 
3437 	mode = PDC2xx_SCR_DMA;
3438 	if (PDC_IS_262(sc)) {
3439 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3440 	} else {
3441 		/* the BIOS set it up this way */
3442 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3443 	}
3444 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3445 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3446 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3447 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3448 	    DEBUG_PROBE);
3449 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3450 
3451 	/* controller initial state register is OK even without BIOS */
3452 	/* Set DMA mode to IDE DMA compatibility */
3453 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3454 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3455 	    DEBUG_PROBE);
3456 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3457 	    mode | 0x1);
3458 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3459 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3460 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3461 	    mode | 0x1);
3462 
3463 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3464 		cp = &sc->pciide_channels[channel];
3465 		if (pciide_chansetup(sc, channel, interface) == 0)
3466 			continue;
3467 		if ((st & (PDC_IS_262(sc) ?
3468 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3469 			printf("%s: %s channel ignored (disabled)\n",
3470 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3471 			continue;
3472 		}
3473 		if (PDC_IS_265(sc))
3474 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3475 			    pdc20265_pci_intr);
3476 		else
3477 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3478 			    pdc202xx_pci_intr);
3479 		if (cp->hw_ok == 0)
3480 			continue;
3481 		if (pciide_chan_candisable(cp))
3482 			st &= ~(PDC_IS_262(sc) ?
3483 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3484 		pciide_map_compat_intr(pa, cp, channel, interface);
3485 		pdc202xx_setup_channel(&cp->wdc_channel);
3486 	}
3487 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3488 	    DEBUG_PROBE);
3489 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3490 	return;
3491 }
3492 
3493 void
3494 pdc202xx_setup_channel(chp)
3495 	struct channel_softc *chp;
3496 {
3497 	struct ata_drive_datas *drvp;
3498 	int drive;
3499 	pcireg_t mode, st;
3500 	u_int32_t idedma_ctl, scr, atapi;
3501 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3502 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3503 	int channel = chp->channel;
3504 
3505 	/* setup DMA if needed */
3506 	pciide_channel_dma_setup(cp);
3507 
3508 	idedma_ctl = 0;
3509 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3510 	    sc->sc_wdcdev.sc_dev.dv_xname,
3511 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3512 	    DEBUG_PROBE);
3513 
3514 	/* Per channel settings */
3515 	if (PDC_IS_262(sc)) {
3516 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3517 		    PDC262_U66);
3518 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3519 		/* Trimm UDMA mode */
3520 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3521 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3522 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3523 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3524 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3525 			if (chp->ch_drive[0].UDMA_mode > 2)
3526 				chp->ch_drive[0].UDMA_mode = 2;
3527 			if (chp->ch_drive[1].UDMA_mode > 2)
3528 				chp->ch_drive[1].UDMA_mode = 2;
3529 		}
3530 		/* Set U66 if needed */
3531 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3532 		    chp->ch_drive[0].UDMA_mode > 2) ||
3533 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3534 		    chp->ch_drive[1].UDMA_mode > 2))
3535 			scr |= PDC262_U66_EN(channel);
3536 		else
3537 			scr &= ~PDC262_U66_EN(channel);
3538 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3539 		    PDC262_U66, scr);
3540 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3541 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3542 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3543 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3544 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3545 			chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3546 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3547 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3548 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3549 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3550 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3551 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3552 				atapi = 0;
3553 			else
3554 				atapi = PDC262_ATAPI_UDMA;
3555 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3556 			    PDC262_ATAPI(channel), atapi);
3557 		}
3558 	}
3559 	for (drive = 0; drive < 2; drive++) {
3560 		drvp = &chp->ch_drive[drive];
3561 		/* If no drive, skip */
3562 		if ((drvp->drive_flags & DRIVE) == 0)
3563 			continue;
3564 		mode = 0;
3565 		if (drvp->drive_flags & DRIVE_UDMA) {
3566 			/* use Ultra/DMA */
3567 			drvp->drive_flags &= ~DRIVE_DMA;
3568 			mode = PDC2xx_TIM_SET_MB(mode,
3569 			    pdc2xx_udma_mb[drvp->UDMA_mode]);
3570 			mode = PDC2xx_TIM_SET_MC(mode,
3571 			    pdc2xx_udma_mc[drvp->UDMA_mode]);
3572 			drvp->drive_flags &= ~DRIVE_DMA;
3573 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3574 		} else if (drvp->drive_flags & DRIVE_DMA) {
3575 			mode = PDC2xx_TIM_SET_MB(mode,
3576 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3577 			mode = PDC2xx_TIM_SET_MC(mode,
3578 			    pdc2xx_dma_mc[drvp->DMA_mode]);
3579 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3580 		} else {
3581 			mode = PDC2xx_TIM_SET_MB(mode,
3582 			    pdc2xx_dma_mb[0]);
3583 			mode = PDC2xx_TIM_SET_MC(mode,
3584 			    pdc2xx_dma_mc[0]);
3585 		}
3586 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3587 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3588 		if (drvp->drive_flags & DRIVE_ATA)
3589 			mode |= PDC2xx_TIM_PRE;
3590 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3591 		if (drvp->PIO_mode >= 3) {
3592 			mode |= PDC2xx_TIM_IORDY;
3593 			if (drive == 0)
3594 				mode |= PDC2xx_TIM_IORDYp;
3595 		}
3596 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3597 		    "timings 0x%x\n",
3598 		    sc->sc_wdcdev.sc_dev.dv_xname,
3599 		    chp->channel, drive, mode), DEBUG_PROBE);
3600 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3601 		    PDC2xx_TIM(chp->channel, drive), mode);
3602 	}
3603 	if (idedma_ctl != 0) {
3604 		/* Add software bits in status register */
3605 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3606 		    IDEDMA_CTL, idedma_ctl);
3607 	}
3608 	pciide_print_modes(cp);
3609 }
3610 
3611 int
3612 pdc202xx_pci_intr(arg)
3613 	void *arg;
3614 {
3615 	struct pciide_softc *sc = arg;
3616 	struct pciide_channel *cp;
3617 	struct channel_softc *wdc_cp;
3618 	int i, rv, crv;
3619 	u_int32_t scr;
3620 
3621 	rv = 0;
3622 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3623 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3624 		cp = &sc->pciide_channels[i];
3625 		wdc_cp = &cp->wdc_channel;
3626 		/* If a compat channel skip. */
3627 		if (cp->compat)
3628 			continue;
3629 		if (scr & PDC2xx_SCR_INT(i)) {
3630 			crv = wdcintr(wdc_cp);
3631 			if (crv == 0)
3632 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3633 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3634 			else
3635 				rv = 1;
3636 		}
3637 	}
3638 	return rv;
3639 }
3640 
3641 int
3642 pdc20265_pci_intr(arg)
3643 	void *arg;
3644 {
3645 	struct pciide_softc *sc = arg;
3646 	struct pciide_channel *cp;
3647 	struct channel_softc *wdc_cp;
3648 	int i, rv, crv;
3649 	u_int32_t dmastat;
3650 
3651 	rv = 0;
3652 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3653 		cp = &sc->pciide_channels[i];
3654 		wdc_cp = &cp->wdc_channel;
3655 		/* If a compat channel skip. */
3656 		if (cp->compat)
3657 			continue;
3658 		/*
3659 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3660 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3661 		 * So use it instead (requires 2 reg reads instead of 1,
3662 		 * but we can't do it another way).
3663 		 */
3664 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3665 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3666 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3667 			continue;
3668 		crv = wdcintr(wdc_cp);
3669 		if (crv == 0)
3670 			printf("%s:%d: bogus intr\n",
3671 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3672 		else
3673 			rv = 1;
3674 	}
3675 	return rv;
3676 }
3677 
3678 void
3679 opti_chip_map(sc, pa)
3680 	struct pciide_softc *sc;
3681 	struct pci_attach_args *pa;
3682 {
3683 	struct pciide_channel *cp;
3684 	bus_size_t cmdsize, ctlsize;
3685 	pcireg_t interface;
3686 	u_int8_t init_ctrl;
3687 	int channel;
3688 
3689 	if (pciide_chipen(sc, pa) == 0)
3690 		return;
3691 	printf("%s: bus-master DMA support present",
3692 	    sc->sc_wdcdev.sc_dev.dv_xname);
3693 
3694 	/*
3695 	 * XXXSCW:
3696 	 * There seem to be a couple of buggy revisions/implementations
3697 	 * of the OPTi pciide chipset. This kludge seems to fix one of
3698 	 * the reported problems (PR/11644) but still fails for the
3699 	 * other (PR/13151), although the latter may be due to other
3700 	 * issues too...
3701 	 */
3702 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
3703 		printf(" but disabled due to chip rev. <= 0x12");
3704 		sc->sc_dma_ok = 0;
3705 		sc->sc_wdcdev.cap = 0;
3706 	} else {
3707 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3708 		pciide_mapreg_dma(sc, pa);
3709 	}
3710 	printf("\n");
3711 
3712 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3713 	sc->sc_wdcdev.PIO_cap = 4;
3714 	if (sc->sc_dma_ok) {
3715 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3716 		sc->sc_wdcdev.irqack = pciide_irqack;
3717 		sc->sc_wdcdev.DMA_cap = 2;
3718 	}
3719 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3720 
3721 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3722 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3723 
3724 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3725 	    OPTI_REG_INIT_CONTROL);
3726 
3727 	interface = PCI_INTERFACE(pa->pa_class);
3728 
3729 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3730 		cp = &sc->pciide_channels[channel];
3731 		if (pciide_chansetup(sc, channel, interface) == 0)
3732 			continue;
3733 		if (channel == 1 &&
3734 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3735 			printf("%s: %s channel ignored (disabled)\n",
3736 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3737 			continue;
3738 		}
3739 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3740 		    pciide_pci_intr);
3741 		if (cp->hw_ok == 0)
3742 			continue;
3743 		pciide_map_compat_intr(pa, cp, channel, interface);
3744 		if (cp->hw_ok == 0)
3745 			continue;
3746 		opti_setup_channel(&cp->wdc_channel);
3747 	}
3748 }
3749 
3750 void
3751 opti_setup_channel(chp)
3752 	struct channel_softc *chp;
3753 {
3754 	struct ata_drive_datas *drvp;
3755 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3756 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3757 	int drive, spd;
3758 	int mode[2];
3759 	u_int8_t rv, mr;
3760 
3761 	/*
3762 	 * The `Delay' and `Address Setup Time' fields of the
3763 	 * Miscellaneous Register are always zero initially.
3764 	 */
3765 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3766 	mr &= ~(OPTI_MISC_DELAY_MASK |
3767 		OPTI_MISC_ADDR_SETUP_MASK |
3768 		OPTI_MISC_INDEX_MASK);
3769 
3770 	/* Prime the control register before setting timing values */
3771 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3772 
3773 	/* Determine the clockrate of the PCIbus the chip is attached to */
3774 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3775 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3776 
3777 	/* setup DMA if needed */
3778 	pciide_channel_dma_setup(cp);
3779 
3780 	for (drive = 0; drive < 2; drive++) {
3781 		drvp = &chp->ch_drive[drive];
3782 		/* If no drive, skip */
3783 		if ((drvp->drive_flags & DRIVE) == 0) {
3784 			mode[drive] = -1;
3785 			continue;
3786 		}
3787 
3788 		if ((drvp->drive_flags & DRIVE_DMA)) {
3789 			/*
3790 			 * Timings will be used for both PIO and DMA,
3791 			 * so adjust DMA mode if needed
3792 			 */
3793 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3794 				drvp->PIO_mode = drvp->DMA_mode + 2;
3795 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3796 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3797 				    drvp->PIO_mode - 2 : 0;
3798 			if (drvp->DMA_mode == 0)
3799 				drvp->PIO_mode = 0;
3800 
3801 			mode[drive] = drvp->DMA_mode + 5;
3802 		} else
3803 			mode[drive] = drvp->PIO_mode;
3804 
3805 		if (drive && mode[0] >= 0 &&
3806 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3807 			/*
3808 			 * Can't have two drives using different values
3809 			 * for `Address Setup Time'.
3810 			 * Slow down the faster drive to compensate.
3811 			 */
3812 			int d = (opti_tim_as[spd][mode[0]] >
3813 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3814 
3815 			mode[d] = mode[1-d];
3816 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3817 			chp->ch_drive[d].DMA_mode = 0;
3818 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3819 		}
3820 	}
3821 
3822 	for (drive = 0; drive < 2; drive++) {
3823 		int m;
3824 		if ((m = mode[drive]) < 0)
3825 			continue;
3826 
3827 		/* Set the Address Setup Time and select appropriate index */
3828 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3829 		rv |= OPTI_MISC_INDEX(drive);
3830 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3831 
3832 		/* Set the pulse width and recovery timing parameters */
3833 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3834 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3835 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3836 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3837 
3838 		/* Set the Enhanced Mode register appropriately */
3839 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3840 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3841 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3842 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3843 	}
3844 
3845 	/* Finally, enable the timings */
3846 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3847 
3848 	pciide_print_modes(cp);
3849 }
3850 
3851 #define	ACARD_IS_850(sc)						\
3852 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3853 
3854 void
3855 acard_chip_map(sc, pa)
3856 	struct pciide_softc *sc;
3857 	struct pci_attach_args *pa;
3858 {
3859 	struct pciide_channel *cp;
3860 	int i;
3861 	pcireg_t interface;
3862 	bus_size_t cmdsize, ctlsize;
3863 
3864 	if (pciide_chipen(sc, pa) == 0)
3865 		return;
3866 
3867 	/*
3868 	 * when the chip is in native mode it identifies itself as a
3869 	 * 'misc mass storage'. Fake interface in this case.
3870 	 */
3871 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3872 		interface = PCI_INTERFACE(pa->pa_class);
3873 	} else {
3874 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3875 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3876 	}
3877 
3878 	printf("%s: bus-master DMA support present",
3879 	    sc->sc_wdcdev.sc_dev.dv_xname);
3880 	pciide_mapreg_dma(sc, pa);
3881 	printf("\n");
3882 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3883 	    WDC_CAPABILITY_MODE;
3884 
3885 	if (sc->sc_dma_ok) {
3886 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3887 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3888 		sc->sc_wdcdev.irqack = pciide_irqack;
3889 	}
3890 	sc->sc_wdcdev.PIO_cap = 4;
3891 	sc->sc_wdcdev.DMA_cap = 2;
3892 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3893 
3894 	sc->sc_wdcdev.set_modes = acard_setup_channel;
3895 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3896 	sc->sc_wdcdev.nchannels = 2;
3897 
3898 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3899 		cp = &sc->pciide_channels[i];
3900 		if (pciide_chansetup(sc, i, interface) == 0)
3901 			continue;
3902 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3903 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3904 			    &ctlsize, pciide_pci_intr);
3905 		} else {
3906 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3907 			    &cmdsize, &ctlsize);
3908 		}
3909 		if (cp->hw_ok == 0)
3910 			return;
3911 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3912 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3913 		wdcattach(&cp->wdc_channel);
3914 		acard_setup_channel(&cp->wdc_channel);
3915 	}
3916 	if (!ACARD_IS_850(sc)) {
3917 		u_int32_t reg;
3918 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3919 		reg &= ~ATP860_CTRL_INT;
3920 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3921 	}
3922 }
3923 
3924 void
3925 acard_setup_channel(chp)
3926 	struct channel_softc *chp;
3927 {
3928 	struct ata_drive_datas *drvp;
3929 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3930 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3931 	int channel = chp->channel;
3932 	int drive;
3933 	u_int32_t idetime, udma_mode;
3934 	u_int32_t idedma_ctl;
3935 
3936 	/* setup DMA if needed */
3937 	pciide_channel_dma_setup(cp);
3938 
3939 	if (ACARD_IS_850(sc)) {
3940 		idetime = 0;
3941 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3942 		udma_mode &= ~ATP850_UDMA_MASK(channel);
3943 	} else {
3944 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3945 		idetime &= ~ATP860_SETTIME_MASK(channel);
3946 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3947 		udma_mode &= ~ATP860_UDMA_MASK(channel);
3948 
3949 		/* check 80 pins cable */
3950 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3951 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3952 			if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3953 			    & ATP860_CTRL_80P(chp->channel)) {
3954 				if (chp->ch_drive[0].UDMA_mode > 2)
3955 					chp->ch_drive[0].UDMA_mode = 2;
3956 				if (chp->ch_drive[1].UDMA_mode > 2)
3957 					chp->ch_drive[1].UDMA_mode = 2;
3958 			}
3959 		}
3960 	}
3961 
3962 	idedma_ctl = 0;
3963 
3964 	/* Per drive settings */
3965 	for (drive = 0; drive < 2; drive++) {
3966 		drvp = &chp->ch_drive[drive];
3967 		/* If no drive, skip */
3968 		if ((drvp->drive_flags & DRIVE) == 0)
3969 			continue;
3970 		/* add timing values, setup DMA if needed */
3971 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3972 		    (drvp->drive_flags & DRIVE_UDMA)) {
3973 			/* use Ultra/DMA */
3974 			if (ACARD_IS_850(sc)) {
3975 				idetime |= ATP850_SETTIME(drive,
3976 				    acard_act_udma[drvp->UDMA_mode],
3977 				    acard_rec_udma[drvp->UDMA_mode]);
3978 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
3979 				    acard_udma_conf[drvp->UDMA_mode]);
3980 			} else {
3981 				idetime |= ATP860_SETTIME(channel, drive,
3982 				    acard_act_udma[drvp->UDMA_mode],
3983 				    acard_rec_udma[drvp->UDMA_mode]);
3984 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
3985 				    acard_udma_conf[drvp->UDMA_mode]);
3986 			}
3987 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3988 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
3989 		    (drvp->drive_flags & DRIVE_DMA)) {
3990 			/* use Multiword DMA */
3991 			drvp->drive_flags &= ~DRIVE_UDMA;
3992 			if (ACARD_IS_850(sc)) {
3993 				idetime |= ATP850_SETTIME(drive,
3994 				    acard_act_dma[drvp->DMA_mode],
3995 				    acard_rec_dma[drvp->DMA_mode]);
3996 			} else {
3997 				idetime |= ATP860_SETTIME(channel, drive,
3998 				    acard_act_dma[drvp->DMA_mode],
3999 				    acard_rec_dma[drvp->DMA_mode]);
4000 			}
4001 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4002 		} else {
4003 			/* PIO only */
4004 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4005 			if (ACARD_IS_850(sc)) {
4006 				idetime |= ATP850_SETTIME(drive,
4007 				    acard_act_pio[drvp->PIO_mode],
4008 				    acard_rec_pio[drvp->PIO_mode]);
4009 			} else {
4010 				idetime |= ATP860_SETTIME(channel, drive,
4011 				    acard_act_pio[drvp->PIO_mode],
4012 				    acard_rec_pio[drvp->PIO_mode]);
4013 			}
4014 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4015 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4016 		    | ATP8x0_CTRL_EN(channel));
4017 		}
4018 	}
4019 
4020 	if (idedma_ctl != 0) {
4021 		/* Add software bits in status register */
4022 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4023 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4024 	}
4025 	pciide_print_modes(cp);
4026 
4027 	if (ACARD_IS_850(sc)) {
4028 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4029 		    ATP850_IDETIME(channel), idetime);
4030 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4031 	} else {
4032 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4033 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4034 	}
4035 }
4036 
4037 int
4038 acard_pci_intr(arg)
4039 	void *arg;
4040 {
4041 	struct pciide_softc *sc = arg;
4042 	struct pciide_channel *cp;
4043 	struct channel_softc *wdc_cp;
4044 	int rv = 0;
4045 	int dmastat, i, crv;
4046 
4047 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4048 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4049 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4050 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4051 			continue;
4052 		cp = &sc->pciide_channels[i];
4053 		wdc_cp = &cp->wdc_channel;
4054 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4055 			(void)wdcintr(wdc_cp);
4056 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4057 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4058 			continue;
4059 		}
4060 		crv = wdcintr(wdc_cp);
4061 		if (crv == 0)
4062 			printf("%s:%d: bogus intr\n",
4063 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4064 		else if (crv == 1)
4065 			rv = 1;
4066 		else if (rv == 0)
4067 			rv = crv;
4068 	}
4069 	return rv;
4070 }
4071