xref: /netbsd-src/sys/dev/pci/pciide.c (revision 21a3d2f02241c56556f4b2305ef1b8036f268f70)
1 /*	$NetBSD: pciide.c,v 1.131 2001/10/16 08:22:50 tron Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 
37 /*
38  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Christopher G. Demetriou
51  *	for the NetBSD Project.
52  * 4. The name of the author may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * PCI IDE controller driver.
69  *
70  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71  * sys/dev/pci/ppb.c, revision 1.16).
72  *
73  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75  * 5/16/94" from the PCI SIG.
76  *
77  */
78 
79 #ifndef WDCDEBUG
80 #define WDCDEBUG
81 #endif
82 
83 #define DEBUG_DMA   0x01
84 #define DEBUG_XFERS  0x02
85 #define DEBUG_FUNCS  0x08
86 #define DEBUG_PROBE  0x10
87 #ifdef WDCDEBUG
88 int wdcdebug_pciide_mask = 0;
89 #define WDCDEBUG_PRINT(args, level) \
90 	if (wdcdebug_pciide_mask & (level)) printf args
91 #else
92 #define WDCDEBUG_PRINT(args, level)
93 #endif
94 #include <sys/param.h>
95 #include <sys/systm.h>
96 #include <sys/device.h>
97 #include <sys/malloc.h>
98 
99 #include <uvm/uvm_extern.h>
100 
101 #include <machine/endian.h>
102 
103 #include <dev/pci/pcireg.h>
104 #include <dev/pci/pcivar.h>
105 #include <dev/pci/pcidevs.h>
106 #include <dev/pci/pciidereg.h>
107 #include <dev/pci/pciidevar.h>
108 #include <dev/pci/pciide_piix_reg.h>
109 #include <dev/pci/pciide_amd_reg.h>
110 #include <dev/pci/pciide_apollo_reg.h>
111 #include <dev/pci/pciide_cmd_reg.h>
112 #include <dev/pci/pciide_cy693_reg.h>
113 #include <dev/pci/pciide_sis_reg.h>
114 #include <dev/pci/pciide_acer_reg.h>
115 #include <dev/pci/pciide_pdc202xx_reg.h>
116 #include <dev/pci/pciide_opti_reg.h>
117 #include <dev/pci/pciide_hpt_reg.h>
118 #include <dev/pci/pciide_acard_reg.h>
119 #include <dev/pci/cy82c693var.h>
120 
121 #include "opt_pciide.h"
122 
123 /* inlines for reading/writing 8-bit PCI registers */
124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
125 					      int));
126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
127 					   int, u_int8_t));
128 
129 static __inline u_int8_t
130 pciide_pci_read(pc, pa, reg)
131 	pci_chipset_tag_t pc;
132 	pcitag_t pa;
133 	int reg;
134 {
135 
136 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
137 	    ((reg & 0x03) * 8) & 0xff);
138 }
139 
140 static __inline void
141 pciide_pci_write(pc, pa, reg, val)
142 	pci_chipset_tag_t pc;
143 	pcitag_t pa;
144 	int reg;
145 	u_int8_t val;
146 {
147 	pcireg_t pcival;
148 
149 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
150 	pcival &= ~(0xff << ((reg & 0x03) * 8));
151 	pcival |= (val << ((reg & 0x03) * 8));
152 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
153 }
154 
155 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
156 
157 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
158 void piix_setup_channel __P((struct channel_softc*));
159 void piix3_4_setup_channel __P((struct channel_softc*));
160 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
161 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
162 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
163 
164 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
165 void amd7x6_setup_channel __P((struct channel_softc*));
166 
167 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void apollo_setup_channel __P((struct channel_softc*));
169 
170 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
172 void cmd0643_9_setup_channel __P((struct channel_softc*));
173 void cmd_channel_map __P((struct pci_attach_args *,
174 			struct pciide_softc *, int));
175 int  cmd_pci_intr __P((void *));
176 void cmd646_9_irqack __P((struct channel_softc *));
177 
178 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
179 void cy693_setup_channel __P((struct channel_softc*));
180 
181 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void sis_setup_channel __P((struct channel_softc*));
183 static int sis_hostbr_match __P(( struct pci_attach_args *));
184 
185 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
186 void acer_setup_channel __P((struct channel_softc*));
187 int  acer_pci_intr __P((void *));
188 static int acer_isabr_match __P(( struct pci_attach_args *));
189 
190 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
191 void pdc202xx_setup_channel __P((struct channel_softc*));
192 int  pdc202xx_pci_intr __P((void *));
193 int  pdc20265_pci_intr __P((void *));
194 
195 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
196 void opti_setup_channel __P((struct channel_softc*));
197 
198 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void hpt_setup_channel __P((struct channel_softc*));
200 int  hpt_pci_intr __P((void *));
201 
202 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void acard_setup_channel __P((struct channel_softc*));
204 int  acard_pci_intr __P((void *));
205 
206 #ifdef PCIIDE_WINBOND_ENABLE
207 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
208 #endif
209 
210 void pciide_channel_dma_setup __P((struct pciide_channel *));
211 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
212 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
213 void pciide_dma_start __P((void*, int, int));
214 int  pciide_dma_finish __P((void*, int, int, int));
215 void pciide_irqack __P((struct channel_softc *));
216 void pciide_print_modes __P((struct pciide_channel *));
217 
218 struct pciide_product_desc {
219 	u_int32_t ide_product;
220 	int ide_flags;
221 	const char *ide_name;
222 	/* map and setup chip, probe drives */
223 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
224 };
225 
226 /* Flags for ide_flags */
227 #define IDE_PCI_CLASS_OVERRIDE	0x0001 /* accept even if class != pciide */
228 #define	IDE_16BIT_IOSPACE	0x0002 /* I/O space BARS ignore upper word */
229 
230 /* Default product description for devices not known from this controller */
231 const struct pciide_product_desc default_product_desc = {
232 	0,
233 	0,
234 	"Generic PCI IDE controller",
235 	default_chip_map,
236 };
237 
238 const struct pciide_product_desc pciide_intel_products[] =  {
239 	{ PCI_PRODUCT_INTEL_82092AA,
240 	  0,
241 	  "Intel 82092AA IDE controller",
242 	  default_chip_map,
243 	},
244 	{ PCI_PRODUCT_INTEL_82371FB_IDE,
245 	  0,
246 	  "Intel 82371FB IDE controller (PIIX)",
247 	  piix_chip_map,
248 	},
249 	{ PCI_PRODUCT_INTEL_82371SB_IDE,
250 	  0,
251 	  "Intel 82371SB IDE Interface (PIIX3)",
252 	  piix_chip_map,
253 	},
254 	{ PCI_PRODUCT_INTEL_82371AB_IDE,
255 	  0,
256 	  "Intel 82371AB IDE controller (PIIX4)",
257 	  piix_chip_map,
258 	},
259 	{ PCI_PRODUCT_INTEL_82440MX_IDE,
260 	  0,
261 	  "Intel 82440MX IDE controller",
262 	  piix_chip_map
263 	},
264 	{ PCI_PRODUCT_INTEL_82801AA_IDE,
265 	  0,
266 	  "Intel 82801AA IDE Controller (ICH)",
267 	  piix_chip_map,
268 	},
269 	{ PCI_PRODUCT_INTEL_82801AB_IDE,
270 	  0,
271 	  "Intel 82801AB IDE Controller (ICH0)",
272 	  piix_chip_map,
273 	},
274 	{ PCI_PRODUCT_INTEL_82801BA_IDE,
275 	  0,
276 	  "Intel 82801BA IDE Controller (ICH2)",
277 	  piix_chip_map,
278 	},
279 	{ PCI_PRODUCT_INTEL_82801BAM_IDE,
280 	  0,
281 	  "Intel 82801BAM IDE Controller (ICH2)",
282 	  piix_chip_map,
283 	},
284 	{ 0,
285 	  0,
286 	  NULL,
287 	  NULL
288 	}
289 };
290 
291 const struct pciide_product_desc pciide_amd_products[] =  {
292 	{ PCI_PRODUCT_AMD_PBC756_IDE,
293 	  0,
294 	  "Advanced Micro Devices AMD756 IDE Controller",
295 	  amd7x6_chip_map
296 	},
297 	{ PCI_PRODUCT_AMD_PBC766_IDE,
298 	  0,
299 	  "Advanced Micro Devices AMD766 IDE Controller",
300 	  amd7x6_chip_map
301 	},
302 	{ 0,
303 	  0,
304 	  NULL,
305 	  NULL
306 	}
307 };
308 
309 const struct pciide_product_desc pciide_cmd_products[] =  {
310 	{ PCI_PRODUCT_CMDTECH_640,
311 	  0,
312 	  "CMD Technology PCI0640",
313 	  cmd_chip_map
314 	},
315 	{ PCI_PRODUCT_CMDTECH_643,
316 	  0,
317 	  "CMD Technology PCI0643",
318 	  cmd0643_9_chip_map,
319 	},
320 	{ PCI_PRODUCT_CMDTECH_646,
321 	  0,
322 	  "CMD Technology PCI0646",
323 	  cmd0643_9_chip_map,
324 	},
325 	{ PCI_PRODUCT_CMDTECH_648,
326 	  IDE_PCI_CLASS_OVERRIDE,
327 	  "CMD Technology PCI0648",
328 	  cmd0643_9_chip_map,
329 	},
330 	{ PCI_PRODUCT_CMDTECH_649,
331 	  IDE_PCI_CLASS_OVERRIDE,
332 	  "CMD Technology PCI0649",
333 	  cmd0643_9_chip_map,
334 	},
335 	{ 0,
336 	  0,
337 	  NULL,
338 	  NULL
339 	}
340 };
341 
342 const struct pciide_product_desc pciide_via_products[] =  {
343 	{ PCI_PRODUCT_VIATECH_VT82C586_IDE,
344 	  0,
345 	  NULL,
346 	  apollo_chip_map,
347 	 },
348 	{ PCI_PRODUCT_VIATECH_VT82C586A_IDE,
349 	  0,
350 	  NULL,
351 	  apollo_chip_map,
352 	},
353 	{ 0,
354 	  0,
355 	  NULL,
356 	  NULL
357 	}
358 };
359 
360 const struct pciide_product_desc pciide_cypress_products[] =  {
361 	{ PCI_PRODUCT_CONTAQ_82C693,
362 	  IDE_16BIT_IOSPACE,
363 	  "Cypress 82C693 IDE Controller",
364 	  cy693_chip_map,
365 	},
366 	{ 0,
367 	  0,
368 	  NULL,
369 	  NULL
370 	}
371 };
372 
373 const struct pciide_product_desc pciide_sis_products[] =  {
374 	{ PCI_PRODUCT_SIS_5597_IDE,
375 	  0,
376 	  "Silicon Integrated System 5597/5598 IDE controller",
377 	  sis_chip_map,
378 	},
379 	{ 0,
380 	  0,
381 	  NULL,
382 	  NULL
383 	}
384 };
385 
386 const struct pciide_product_desc pciide_acer_products[] =  {
387 	{ PCI_PRODUCT_ALI_M5229,
388 	  0,
389 	  "Acer Labs M5229 UDMA IDE Controller",
390 	  acer_chip_map,
391 	},
392 	{ 0,
393 	  0,
394 	  NULL,
395 	  NULL
396 	}
397 };
398 
399 const struct pciide_product_desc pciide_promise_products[] =  {
400 	{ PCI_PRODUCT_PROMISE_ULTRA33,
401 	  IDE_PCI_CLASS_OVERRIDE,
402 	  "Promise Ultra33/ATA Bus Master IDE Accelerator",
403 	  pdc202xx_chip_map,
404 	},
405 	{ PCI_PRODUCT_PROMISE_ULTRA66,
406 	  IDE_PCI_CLASS_OVERRIDE,
407 	  "Promise Ultra66/ATA Bus Master IDE Accelerator",
408 	  pdc202xx_chip_map,
409 	},
410 	{ PCI_PRODUCT_PROMISE_ULTRA100,
411 	  IDE_PCI_CLASS_OVERRIDE,
412 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
413 	  pdc202xx_chip_map,
414 	},
415 	{ PCI_PRODUCT_PROMISE_ULTRA100X,
416 	  IDE_PCI_CLASS_OVERRIDE,
417 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
418 	  pdc202xx_chip_map,
419 	},
420 	{ 0,
421 	  0,
422 	  NULL,
423 	  NULL
424 	}
425 };
426 
427 const struct pciide_product_desc pciide_opti_products[] =  {
428 	{ PCI_PRODUCT_OPTI_82C621,
429 	  0,
430 	  "OPTi 82c621 PCI IDE controller",
431 	  opti_chip_map,
432 	},
433 	{ PCI_PRODUCT_OPTI_82C568,
434 	  0,
435 	  "OPTi 82c568 (82c621 compatible) PCI IDE controller",
436 	  opti_chip_map,
437 	},
438 	{ PCI_PRODUCT_OPTI_82D568,
439 	  0,
440 	  "OPTi 82d568 (82c621 compatible) PCI IDE controller",
441 	  opti_chip_map,
442 	},
443 	{ 0,
444 	  0,
445 	  NULL,
446 	  NULL
447 	}
448 };
449 
450 const struct pciide_product_desc pciide_triones_products[] =  {
451 	{ PCI_PRODUCT_TRIONES_HPT366,
452 	  IDE_PCI_CLASS_OVERRIDE,
453 	  NULL,
454 	  hpt_chip_map,
455 	},
456 	{ 0,
457 	  0,
458 	  NULL,
459 	  NULL
460 	}
461 };
462 
463 const struct pciide_product_desc pciide_acard_products[] =  {
464 	{ PCI_PRODUCT_ACARD_ATP850U,
465 	  IDE_PCI_CLASS_OVERRIDE,
466 	  "Acard ATP850U Ultra33 IDE Controller",
467 	  acard_chip_map,
468 	},
469 	{ PCI_PRODUCT_ACARD_ATP860,
470 	  IDE_PCI_CLASS_OVERRIDE,
471 	  "Acard ATP860 Ultra66 IDE Controller",
472 	  acard_chip_map,
473 	},
474 	{ PCI_PRODUCT_ACARD_ATP860A,
475 	  IDE_PCI_CLASS_OVERRIDE,
476 	  "Acard ATP860-A Ultra66 IDE Controller",
477 	  acard_chip_map,
478 	},
479 	{ 0,
480 	  0,
481 	  NULL,
482 	  NULL
483 	}
484 };
485 
486 #ifdef PCIIDE_SERVERWORKS_ENABLE
487 const struct pciide_product_desc pciide_serverworks_products[] =  {
488 	{ PCI_PRODUCT_SERVERWORKS_IDE,
489 	  0,
490 	  "ServerWorks ROSB4 IDE Controller",
491 	  piix_chip_map,
492 	},
493 	{ 0,
494 	  0,
495 	  NULL,
496 	}
497 };
498 #endif
499 
500 #ifdef PCIIDE_WINBOND_ENABLE
501 const struct pciide_product_desc pciide_winbond_products[] =  {
502 	{ PCI_PRODUCT_WINBOND_W83C553F_1,
503 	  0,
504 	  "Winbond W83C553F IDE controller",
505 	  winbond_chip_map,
506 	},
507 	{ 0,
508 	  0,
509 	  NULL,
510 	}
511 };
512 #endif
513 
514 struct pciide_vendor_desc {
515 	u_int32_t ide_vendor;
516 	const struct pciide_product_desc *ide_products;
517 };
518 
519 const struct pciide_vendor_desc pciide_vendors[] = {
520 	{ PCI_VENDOR_INTEL, pciide_intel_products },
521 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products },
522 	{ PCI_VENDOR_VIATECH, pciide_via_products },
523 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products },
524 	{ PCI_VENDOR_SIS, pciide_sis_products },
525 	{ PCI_VENDOR_ALI, pciide_acer_products },
526 	{ PCI_VENDOR_PROMISE, pciide_promise_products },
527 	{ PCI_VENDOR_AMD, pciide_amd_products },
528 	{ PCI_VENDOR_OPTI, pciide_opti_products },
529 	{ PCI_VENDOR_TRIONES, pciide_triones_products },
530 	{ PCI_VENDOR_ACARD, pciide_acard_products },
531 #ifdef PCIIDE_SERVERWORKS_ENABLE
532 	{ PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
533 #endif
534 #ifdef PCIIDE_WINBOND_ENABLE
535 	{ PCI_VENDOR_WINBOND, pciide_winbond_products },
536 #endif
537 	{ 0, NULL }
538 };
539 
540 /* options passed via the 'flags' config keyword */
541 #define PCIIDE_OPTIONS_DMA	0x01
542 
543 int	pciide_match __P((struct device *, struct cfdata *, void *));
544 void	pciide_attach __P((struct device *, struct device *, void *));
545 
546 struct cfattach pciide_ca = {
547 	sizeof(struct pciide_softc), pciide_match, pciide_attach
548 };
549 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
550 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
551 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
552 int	pciide_mapregs_native __P((struct pci_attach_args *,
553 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
554 	    int (*pci_intr) __P((void *))));
555 void	pciide_mapreg_dma __P((struct pciide_softc *,
556 	    struct pci_attach_args *));
557 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
558 void	pciide_mapchan __P((struct pci_attach_args *,
559 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
560 	    int (*pci_intr) __P((void *))));
561 int	pciide_chan_candisable __P((struct pciide_channel *));
562 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
563 	    struct pciide_channel *, int, int));
564 int	pciide_compat_intr __P((void *));
565 int	pciide_pci_intr __P((void *));
566 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
567 
568 const struct pciide_product_desc *
569 pciide_lookup_product(id)
570 	u_int32_t id;
571 {
572 	const struct pciide_product_desc *pp;
573 	const struct pciide_vendor_desc *vp;
574 
575 	for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
576 		if (PCI_VENDOR(id) == vp->ide_vendor)
577 			break;
578 
579 	if ((pp = vp->ide_products) == NULL)
580 		return NULL;
581 
582 	for (; pp->chip_map != NULL; pp++)
583 		if (PCI_PRODUCT(id) == pp->ide_product)
584 			break;
585 
586 	if (pp->chip_map == NULL)
587 		return NULL;
588 	return pp;
589 }
590 
591 int
592 pciide_match(parent, match, aux)
593 	struct device *parent;
594 	struct cfdata *match;
595 	void *aux;
596 {
597 	struct pci_attach_args *pa = aux;
598 	const struct pciide_product_desc *pp;
599 
600 	/*
601 	 * Check the ID register to see that it's a PCI IDE controller.
602 	 * If it is, we assume that we can deal with it; it _should_
603 	 * work in a standardized way...
604 	 */
605 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
606 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
607 		return (1);
608 	}
609 
610 	/*
611 	 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
612 	 * controllers. Let see if we can deal with it anyway.
613 	 */
614 	pp = pciide_lookup_product(pa->pa_id);
615 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
616 		return (1);
617 	}
618 
619 	return (0);
620 }
621 
622 void
623 pciide_attach(parent, self, aux)
624 	struct device *parent, *self;
625 	void *aux;
626 {
627 	struct pci_attach_args *pa = aux;
628 	pci_chipset_tag_t pc = pa->pa_pc;
629 	pcitag_t tag = pa->pa_tag;
630 	struct pciide_softc *sc = (struct pciide_softc *)self;
631 	pcireg_t csr;
632 	char devinfo[256];
633 	const char *displaydev;
634 
635 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
636 	if (sc->sc_pp == NULL) {
637 		sc->sc_pp = &default_product_desc;
638 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
639 		displaydev = devinfo;
640 	} else
641 		displaydev = sc->sc_pp->ide_name;
642 
643 	/* if displaydev == NULL, printf is done in chip-specific map */
644 	if (displaydev)
645 		printf(": %s (rev. 0x%02x)\n", displaydev,
646 		    PCI_REVISION(pa->pa_class));
647 
648 	sc->sc_pc = pa->pa_pc;
649 	sc->sc_tag = pa->pa_tag;
650 #ifdef WDCDEBUG
651 	if (wdcdebug_pciide_mask & DEBUG_PROBE)
652 		pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
653 #endif
654 	sc->sc_pp->chip_map(sc, pa);
655 
656 	if (sc->sc_dma_ok) {
657 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
658 		csr |= PCI_COMMAND_MASTER_ENABLE;
659 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
660 	}
661 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
662 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
663 }
664 
665 /* tell wether the chip is enabled or not */
666 int
667 pciide_chipen(sc, pa)
668 	struct pciide_softc *sc;
669 	struct pci_attach_args *pa;
670 {
671 	pcireg_t csr;
672 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
673 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
674 		    PCI_COMMAND_STATUS_REG);
675 		printf("%s: device disabled (at %s)\n",
676 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
677 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
678 		  "device" : "bridge");
679 		return 0;
680 	}
681 	return 1;
682 }
683 
684 int
685 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
686 	struct pci_attach_args *pa;
687 	struct pciide_channel *cp;
688 	int compatchan;
689 	bus_size_t *cmdsizep, *ctlsizep;
690 {
691 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
692 	struct channel_softc *wdc_cp = &cp->wdc_channel;
693 
694 	cp->compat = 1;
695 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
696 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
697 
698 	wdc_cp->cmd_iot = pa->pa_iot;
699 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
700 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
701 		printf("%s: couldn't map %s channel cmd regs\n",
702 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
703 		return (0);
704 	}
705 
706 	wdc_cp->ctl_iot = pa->pa_iot;
707 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
708 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
709 		printf("%s: couldn't map %s channel ctl regs\n",
710 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
711 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
712 		    PCIIDE_COMPAT_CMD_SIZE);
713 		return (0);
714 	}
715 
716 	return (1);
717 }
718 
719 int
720 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
721 	struct pci_attach_args * pa;
722 	struct pciide_channel *cp;
723 	bus_size_t *cmdsizep, *ctlsizep;
724 	int (*pci_intr) __P((void *));
725 {
726 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
727 	struct channel_softc *wdc_cp = &cp->wdc_channel;
728 	const char *intrstr;
729 	pci_intr_handle_t intrhandle;
730 
731 	cp->compat = 0;
732 
733 	if (sc->sc_pci_ih == NULL) {
734 		if (pci_intr_map(pa, &intrhandle) != 0) {
735 			printf("%s: couldn't map native-PCI interrupt\n",
736 			    sc->sc_wdcdev.sc_dev.dv_xname);
737 			return 0;
738 		}
739 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
740 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
741 		    intrhandle, IPL_BIO, pci_intr, sc);
742 		if (sc->sc_pci_ih != NULL) {
743 			printf("%s: using %s for native-PCI interrupt\n",
744 			    sc->sc_wdcdev.sc_dev.dv_xname,
745 			    intrstr ? intrstr : "unknown interrupt");
746 		} else {
747 			printf("%s: couldn't establish native-PCI interrupt",
748 			    sc->sc_wdcdev.sc_dev.dv_xname);
749 			if (intrstr != NULL)
750 				printf(" at %s", intrstr);
751 			printf("\n");
752 			return 0;
753 		}
754 	}
755 	cp->ih = sc->sc_pci_ih;
756 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
757 	    PCI_MAPREG_TYPE_IO, 0,
758 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
759 		printf("%s: couldn't map %s channel cmd regs\n",
760 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
761 		return 0;
762 	}
763 
764 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
765 	    PCI_MAPREG_TYPE_IO, 0,
766 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
767 		printf("%s: couldn't map %s channel ctl regs\n",
768 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
769 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
770 		return 0;
771 	}
772 	/*
773 	 * In native mode, 4 bytes of I/O space are mapped for the control
774 	 * register, the control register is at offset 2. Pass the generic
775 	 * code a handle for only one byte at the rigth offset.
776 	 */
777 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
778 	    &wdc_cp->ctl_ioh) != 0) {
779 		printf("%s: unable to subregion %s channel ctl regs\n",
780 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
781 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
782 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
783 		return 0;
784 	}
785 	return (1);
786 }
787 
788 void
789 pciide_mapreg_dma(sc, pa)
790 	struct pciide_softc *sc;
791 	struct pci_attach_args *pa;
792 {
793 	pcireg_t maptype;
794 	bus_addr_t addr;
795 
796 	/*
797 	 * Map DMA registers
798 	 *
799 	 * Note that sc_dma_ok is the right variable to test to see if
800 	 * DMA can be done.  If the interface doesn't support DMA,
801 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
802 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
803 	 * non-zero if the interface supports DMA and the registers
804 	 * could be mapped.
805 	 *
806 	 * XXX Note that despite the fact that the Bus Master IDE specs
807 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
808 	 * XXX space," some controllers (at least the United
809 	 * XXX Microelectronics UM8886BF) place it in memory space.
810 	 */
811 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
812 	    PCIIDE_REG_BUS_MASTER_DMA);
813 
814 	switch (maptype) {
815 	case PCI_MAPREG_TYPE_IO:
816 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
817 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
818 		    &addr, NULL, NULL) == 0);
819 		if (sc->sc_dma_ok == 0) {
820 			printf(", but unused (couldn't query registers)");
821 			break;
822 		}
823 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
824 		    && addr >= 0x10000) {
825 			sc->sc_dma_ok = 0;
826 			printf(", but unused (registers at unsafe address %#lx)", (unsigned long)addr);
827 			break;
828 		}
829 		/* FALLTHROUGH */
830 
831 	case PCI_MAPREG_MEM_TYPE_32BIT:
832 		sc->sc_dma_ok = (pci_mapreg_map(pa,
833 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
834 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
835 		sc->sc_dmat = pa->pa_dmat;
836 		if (sc->sc_dma_ok == 0) {
837 			printf(", but unused (couldn't map registers)");
838 		} else {
839 			sc->sc_wdcdev.dma_arg = sc;
840 			sc->sc_wdcdev.dma_init = pciide_dma_init;
841 			sc->sc_wdcdev.dma_start = pciide_dma_start;
842 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
843 		}
844 		break;
845 
846 	default:
847 		sc->sc_dma_ok = 0;
848 		printf(", but unsupported register maptype (0x%x)", maptype);
849 	}
850 }
851 
852 int
853 pciide_compat_intr(arg)
854 	void *arg;
855 {
856 	struct pciide_channel *cp = arg;
857 
858 #ifdef DIAGNOSTIC
859 	/* should only be called for a compat channel */
860 	if (cp->compat == 0)
861 		panic("pciide compat intr called for non-compat chan %p\n", cp);
862 #endif
863 	return (wdcintr(&cp->wdc_channel));
864 }
865 
866 int
867 pciide_pci_intr(arg)
868 	void *arg;
869 {
870 	struct pciide_softc *sc = arg;
871 	struct pciide_channel *cp;
872 	struct channel_softc *wdc_cp;
873 	int i, rv, crv;
874 
875 	rv = 0;
876 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
877 		cp = &sc->pciide_channels[i];
878 		wdc_cp = &cp->wdc_channel;
879 
880 		/* If a compat channel skip. */
881 		if (cp->compat)
882 			continue;
883 		/* if this channel not waiting for intr, skip */
884 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
885 			continue;
886 
887 		crv = wdcintr(wdc_cp);
888 		if (crv == 0)
889 			;		/* leave rv alone */
890 		else if (crv == 1)
891 			rv = 1;		/* claim the intr */
892 		else if (rv == 0)	/* crv should be -1 in this case */
893 			rv = crv;	/* if we've done no better, take it */
894 	}
895 	return (rv);
896 }
897 
898 void
899 pciide_channel_dma_setup(cp)
900 	struct pciide_channel *cp;
901 {
902 	int drive;
903 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
904 	struct ata_drive_datas *drvp;
905 
906 	for (drive = 0; drive < 2; drive++) {
907 		drvp = &cp->wdc_channel.ch_drive[drive];
908 		/* If no drive, skip */
909 		if ((drvp->drive_flags & DRIVE) == 0)
910 			continue;
911 		/* setup DMA if needed */
912 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
913 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
914 		    sc->sc_dma_ok == 0) {
915 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
916 			continue;
917 		}
918 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
919 		    != 0) {
920 			/* Abort DMA setup */
921 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
922 			continue;
923 		}
924 	}
925 }
926 
927 int
928 pciide_dma_table_setup(sc, channel, drive)
929 	struct pciide_softc *sc;
930 	int channel, drive;
931 {
932 	bus_dma_segment_t seg;
933 	int error, rseg;
934 	const bus_size_t dma_table_size =
935 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
936 	struct pciide_dma_maps *dma_maps =
937 	    &sc->pciide_channels[channel].dma_maps[drive];
938 
939 	/* If table was already allocated, just return */
940 	if (dma_maps->dma_table)
941 		return 0;
942 
943 	/* Allocate memory for the DMA tables and map it */
944 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
945 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
946 	    BUS_DMA_NOWAIT)) != 0) {
947 		printf("%s:%d: unable to allocate table DMA for "
948 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
949 		    channel, drive, error);
950 		return error;
951 	}
952 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
953 	    dma_table_size,
954 	    (caddr_t *)&dma_maps->dma_table,
955 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
956 		printf("%s:%d: unable to map table DMA for"
957 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
958 		    channel, drive, error);
959 		return error;
960 	}
961 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
962 	    "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
963 	    (unsigned long)seg.ds_addr), DEBUG_PROBE);
964 
965 	/* Create and load table DMA map for this disk */
966 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
967 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
968 	    &dma_maps->dmamap_table)) != 0) {
969 		printf("%s:%d: unable to create table DMA map for "
970 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
971 		    channel, drive, error);
972 		return error;
973 	}
974 	if ((error = bus_dmamap_load(sc->sc_dmat,
975 	    dma_maps->dmamap_table,
976 	    dma_maps->dma_table,
977 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
978 		printf("%s:%d: unable to load table DMA map for "
979 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
980 		    channel, drive, error);
981 		return error;
982 	}
983 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
984 	    (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
985 	    DEBUG_PROBE);
986 	/* Create a xfer DMA map for this drive */
987 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
988 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
989 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
990 	    &dma_maps->dmamap_xfer)) != 0) {
991 		printf("%s:%d: unable to create xfer DMA map for "
992 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
993 		    channel, drive, error);
994 		return error;
995 	}
996 	return 0;
997 }
998 
999 int
1000 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1001 	void *v;
1002 	int channel, drive;
1003 	void *databuf;
1004 	size_t datalen;
1005 	int flags;
1006 {
1007 	struct pciide_softc *sc = v;
1008 	int error, seg;
1009 	struct pciide_dma_maps *dma_maps =
1010 	    &sc->pciide_channels[channel].dma_maps[drive];
1011 
1012 	error = bus_dmamap_load(sc->sc_dmat,
1013 	    dma_maps->dmamap_xfer,
1014 	    databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1015 	    ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1016 	if (error) {
1017 		printf("%s:%d: unable to load xfer DMA map for"
1018 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1019 		    channel, drive, error);
1020 		return error;
1021 	}
1022 
1023 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1024 	    dma_maps->dmamap_xfer->dm_mapsize,
1025 	    (flags & WDC_DMA_READ) ?
1026 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1027 
1028 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1029 #ifdef DIAGNOSTIC
1030 		/* A segment must not cross a 64k boundary */
1031 		{
1032 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1033 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1034 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1035 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1036 			printf("pciide_dma: segment %d physical addr 0x%lx"
1037 			    " len 0x%lx not properly aligned\n",
1038 			    seg, phys, len);
1039 			panic("pciide_dma: buf align");
1040 		}
1041 		}
1042 #endif
1043 		dma_maps->dma_table[seg].base_addr =
1044 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1045 		dma_maps->dma_table[seg].byte_count =
1046 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1047 		    IDEDMA_BYTE_COUNT_MASK);
1048 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1049 		   seg, le32toh(dma_maps->dma_table[seg].byte_count),
1050 		   le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1051 
1052 	}
1053 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1054 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1055 
1056 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1057 	    dma_maps->dmamap_table->dm_mapsize,
1058 	    BUS_DMASYNC_PREWRITE);
1059 
1060 	/* Maps are ready. Start DMA function */
1061 #ifdef DIAGNOSTIC
1062 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1063 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1064 		    (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1065 		panic("pciide_dma_init: table align");
1066 	}
1067 #endif
1068 
1069 	/* Clear status bits */
1070 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1071 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1072 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1073 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1074 	/* Write table addr */
1075 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1076 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1077 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1078 	/* set read/write */
1079 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1080 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1081 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1082 	/* remember flags */
1083 	dma_maps->dma_flags = flags;
1084 	return 0;
1085 }
1086 
1087 void
1088 pciide_dma_start(v, channel, drive)
1089 	void *v;
1090 	int channel, drive;
1091 {
1092 	struct pciide_softc *sc = v;
1093 
1094 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1095 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1096 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1097 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1098 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1099 }
1100 
1101 int
1102 pciide_dma_finish(v, channel, drive, force)
1103 	void *v;
1104 	int channel, drive;
1105 	int force;
1106 {
1107 	struct pciide_softc *sc = v;
1108 	u_int8_t status;
1109 	int error = 0;
1110 	struct pciide_dma_maps *dma_maps =
1111 	    &sc->pciide_channels[channel].dma_maps[drive];
1112 
1113 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1114 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1115 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1116 	    DEBUG_XFERS);
1117 
1118 	if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1119 		return WDC_DMAST_NOIRQ;
1120 
1121 	/* stop DMA channel */
1122 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1123 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1124 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1125 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1126 
1127 	/* Unload the map of the data buffer */
1128 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1129 	    dma_maps->dmamap_xfer->dm_mapsize,
1130 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1131 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1132 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1133 
1134 	if ((status & IDEDMA_CTL_ERR) != 0) {
1135 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1136 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1137 		error |= WDC_DMAST_ERR;
1138 	}
1139 
1140 	if ((status & IDEDMA_CTL_INTR) == 0) {
1141 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1142 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1143 		    drive, status);
1144 		error |= WDC_DMAST_NOIRQ;
1145 	}
1146 
1147 	if ((status & IDEDMA_CTL_ACT) != 0) {
1148 		/* data underrun, may be a valid condition for ATAPI */
1149 		error |= WDC_DMAST_UNDER;
1150 	}
1151 	return error;
1152 }
1153 
1154 void
1155 pciide_irqack(chp)
1156 	struct channel_softc *chp;
1157 {
1158 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1159 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1160 
1161 	/* clear status bits in IDE DMA registers */
1162 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1163 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1164 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1165 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1166 }
1167 
1168 /* some common code used by several chip_map */
1169 int
1170 pciide_chansetup(sc, channel, interface)
1171 	struct pciide_softc *sc;
1172 	int channel;
1173 	pcireg_t interface;
1174 {
1175 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1176 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1177 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1178 	cp->wdc_channel.channel = channel;
1179 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1180 	cp->wdc_channel.ch_queue =
1181 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1182 	if (cp->wdc_channel.ch_queue == NULL) {
1183 		printf("%s %s channel: "
1184 		    "can't allocate memory for command queue",
1185 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1186 		return 0;
1187 	}
1188 	printf("%s: %s channel %s to %s mode\n",
1189 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1190 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1191 	    "configured" : "wired",
1192 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1193 	    "native-PCI" : "compatibility");
1194 	return 1;
1195 }
1196 
1197 /* some common code used by several chip channel_map */
1198 void
1199 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1200 	struct pci_attach_args *pa;
1201 	struct pciide_channel *cp;
1202 	pcireg_t interface;
1203 	bus_size_t *cmdsizep, *ctlsizep;
1204 	int (*pci_intr) __P((void *));
1205 {
1206 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1207 
1208 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1209 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1210 		    pci_intr);
1211 	else
1212 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1213 		    wdc_cp->channel, cmdsizep, ctlsizep);
1214 
1215 	if (cp->hw_ok == 0)
1216 		return;
1217 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1218 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1219 	wdcattach(wdc_cp);
1220 }
1221 
1222 /*
1223  * Generic code to call to know if a channel can be disabled. Return 1
1224  * if channel can be disabled, 0 if not
1225  */
1226 int
1227 pciide_chan_candisable(cp)
1228 	struct pciide_channel *cp;
1229 {
1230 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1231 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1232 
1233 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1234 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1235 		printf("%s: disabling %s channel (no drives)\n",
1236 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1237 		cp->hw_ok = 0;
1238 		return 1;
1239 	}
1240 	return 0;
1241 }
1242 
1243 /*
1244  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1245  * Set hw_ok=0 on failure
1246  */
1247 void
1248 pciide_map_compat_intr(pa, cp, compatchan, interface)
1249 	struct pci_attach_args *pa;
1250 	struct pciide_channel *cp;
1251 	int compatchan, interface;
1252 {
1253 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1254 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1255 
1256 	if (cp->hw_ok == 0)
1257 		return;
1258 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1259 		return;
1260 
1261 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1262 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1263 	    pa, compatchan, pciide_compat_intr, cp);
1264 	if (cp->ih == NULL) {
1265 #endif
1266 		printf("%s: no compatibility interrupt for use by %s "
1267 		    "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1268 		cp->hw_ok = 0;
1269 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1270 	}
1271 #endif
1272 }
1273 
1274 void
1275 pciide_print_modes(cp)
1276 	struct pciide_channel *cp;
1277 {
1278 	wdc_print_modes(&cp->wdc_channel);
1279 }
1280 
1281 void
1282 default_chip_map(sc, pa)
1283 	struct pciide_softc *sc;
1284 	struct pci_attach_args *pa;
1285 {
1286 	struct pciide_channel *cp;
1287 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1288 	pcireg_t csr;
1289 	int channel, drive;
1290 	struct ata_drive_datas *drvp;
1291 	u_int8_t idedma_ctl;
1292 	bus_size_t cmdsize, ctlsize;
1293 	char *failreason;
1294 
1295 	if (pciide_chipen(sc, pa) == 0)
1296 		return;
1297 
1298 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1299 		printf("%s: bus-master DMA support present",
1300 		    sc->sc_wdcdev.sc_dev.dv_xname);
1301 		if (sc->sc_pp == &default_product_desc &&
1302 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1303 		    PCIIDE_OPTIONS_DMA) == 0) {
1304 			printf(", but unused (no driver support)");
1305 			sc->sc_dma_ok = 0;
1306 		} else {
1307 			pciide_mapreg_dma(sc, pa);
1308 		if (sc->sc_dma_ok != 0)
1309 			printf(", used without full driver "
1310 			    "support");
1311 		}
1312 	} else {
1313 		printf("%s: hardware does not support DMA",
1314 		    sc->sc_wdcdev.sc_dev.dv_xname);
1315 		sc->sc_dma_ok = 0;
1316 	}
1317 	printf("\n");
1318 	if (sc->sc_dma_ok) {
1319 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1320 		sc->sc_wdcdev.irqack = pciide_irqack;
1321 	}
1322 	sc->sc_wdcdev.PIO_cap = 0;
1323 	sc->sc_wdcdev.DMA_cap = 0;
1324 
1325 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1326 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1327 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1328 
1329 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1330 		cp = &sc->pciide_channels[channel];
1331 		if (pciide_chansetup(sc, channel, interface) == 0)
1332 			continue;
1333 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1334 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1335 			    &ctlsize, pciide_pci_intr);
1336 		} else {
1337 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1338 			    channel, &cmdsize, &ctlsize);
1339 		}
1340 		if (cp->hw_ok == 0)
1341 			continue;
1342 		/*
1343 		 * Check to see if something appears to be there.
1344 		 */
1345 		failreason = NULL;
1346 		if (!wdcprobe(&cp->wdc_channel)) {
1347 			failreason = "not responding; disabled or no drives?";
1348 			goto next;
1349 		}
1350 		/*
1351 		 * Now, make sure it's actually attributable to this PCI IDE
1352 		 * channel by trying to access the channel again while the
1353 		 * PCI IDE controller's I/O space is disabled.  (If the
1354 		 * channel no longer appears to be there, it belongs to
1355 		 * this controller.)  YUCK!
1356 		 */
1357 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1358 		    PCI_COMMAND_STATUS_REG);
1359 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1360 		    csr & ~PCI_COMMAND_IO_ENABLE);
1361 		if (wdcprobe(&cp->wdc_channel))
1362 			failreason = "other hardware responding at addresses";
1363 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1364 		    PCI_COMMAND_STATUS_REG, csr);
1365 next:
1366 		if (failreason) {
1367 			printf("%s: %s channel ignored (%s)\n",
1368 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1369 			    failreason);
1370 			cp->hw_ok = 0;
1371 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1372 			    cp->wdc_channel.cmd_ioh, cmdsize);
1373 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1374 			    cp->wdc_channel.ctl_ioh, ctlsize);
1375 		} else {
1376 			pciide_map_compat_intr(pa, cp, channel, interface);
1377 		}
1378 		if (cp->hw_ok) {
1379 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1380 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1381 			wdcattach(&cp->wdc_channel);
1382 		}
1383 	}
1384 
1385 	if (sc->sc_dma_ok == 0)
1386 		return;
1387 
1388 	/* Allocate DMA maps */
1389 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1390 		idedma_ctl = 0;
1391 		cp = &sc->pciide_channels[channel];
1392 		for (drive = 0; drive < 2; drive++) {
1393 			drvp = &cp->wdc_channel.ch_drive[drive];
1394 			/* If no drive, skip */
1395 			if ((drvp->drive_flags & DRIVE) == 0)
1396 				continue;
1397 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1398 				continue;
1399 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1400 				/* Abort DMA setup */
1401 				printf("%s:%d:%d: can't allocate DMA maps, "
1402 				    "using PIO transfers\n",
1403 				    sc->sc_wdcdev.sc_dev.dv_xname,
1404 				    channel, drive);
1405 				drvp->drive_flags &= ~DRIVE_DMA;
1406 			}
1407 			printf("%s:%d:%d: using DMA data transfers\n",
1408 			    sc->sc_wdcdev.sc_dev.dv_xname,
1409 			    channel, drive);
1410 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1411 		}
1412 		if (idedma_ctl != 0) {
1413 			/* Add software bits in status register */
1414 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1415 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1416 			    idedma_ctl);
1417 		}
1418 	}
1419 }
1420 
1421 void
1422 piix_chip_map(sc, pa)
1423 	struct pciide_softc *sc;
1424 	struct pci_attach_args *pa;
1425 {
1426 	struct pciide_channel *cp;
1427 	int channel;
1428 	u_int32_t idetim;
1429 	bus_size_t cmdsize, ctlsize;
1430 
1431 	if (pciide_chipen(sc, pa) == 0)
1432 		return;
1433 
1434 	printf("%s: bus-master DMA support present",
1435 	    sc->sc_wdcdev.sc_dev.dv_xname);
1436 	pciide_mapreg_dma(sc, pa);
1437 	printf("\n");
1438 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1439 	    WDC_CAPABILITY_MODE;
1440 	if (sc->sc_dma_ok) {
1441 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1442 		sc->sc_wdcdev.irqack = pciide_irqack;
1443 		switch(sc->sc_pp->ide_product) {
1444 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1445 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1446 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1447 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1448 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1449 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1450 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1451 		}
1452 	}
1453 	sc->sc_wdcdev.PIO_cap = 4;
1454 	sc->sc_wdcdev.DMA_cap = 2;
1455 	switch(sc->sc_pp->ide_product) {
1456 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1457 		sc->sc_wdcdev.UDMA_cap = 4;
1458 		break;
1459 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1460 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1461 		sc->sc_wdcdev.UDMA_cap = 5;
1462 		break;
1463 	default:
1464 		sc->sc_wdcdev.UDMA_cap = 2;
1465 	}
1466 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1467 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1468 	else
1469 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1470 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1471 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1472 
1473 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1474 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1475 	    DEBUG_PROBE);
1476 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1477 		WDCDEBUG_PRINT((", sidetim=0x%x",
1478 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1479 		    DEBUG_PROBE);
1480 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1481 			WDCDEBUG_PRINT((", udamreg 0x%x",
1482 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1483 			    DEBUG_PROBE);
1484 		}
1485 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1486 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1487 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1488 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1489 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1490 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1491 			    DEBUG_PROBE);
1492 		}
1493 
1494 	}
1495 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1496 
1497 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1498 		cp = &sc->pciide_channels[channel];
1499 		/* PIIX is compat-only */
1500 		if (pciide_chansetup(sc, channel, 0) == 0)
1501 			continue;
1502 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1503 		if ((PIIX_IDETIM_READ(idetim, channel) &
1504 		    PIIX_IDETIM_IDE) == 0) {
1505 			printf("%s: %s channel ignored (disabled)\n",
1506 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1507 			continue;
1508 		}
1509 		/* PIIX are compat-only pciide devices */
1510 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1511 		if (cp->hw_ok == 0)
1512 			continue;
1513 		if (pciide_chan_candisable(cp)) {
1514 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1515 			    channel);
1516 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1517 			    idetim);
1518 		}
1519 		pciide_map_compat_intr(pa, cp, channel, 0);
1520 		if (cp->hw_ok == 0)
1521 			continue;
1522 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1523 	}
1524 
1525 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1526 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1527 	    DEBUG_PROBE);
1528 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1529 		WDCDEBUG_PRINT((", sidetim=0x%x",
1530 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1531 		    DEBUG_PROBE);
1532 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1533 			WDCDEBUG_PRINT((", udamreg 0x%x",
1534 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1535 			    DEBUG_PROBE);
1536 		}
1537 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1538 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1539 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1540 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1541 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1542 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1543 			    DEBUG_PROBE);
1544 		}
1545 	}
1546 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1547 }
1548 
1549 void
1550 piix_setup_channel(chp)
1551 	struct channel_softc *chp;
1552 {
1553 	u_int8_t mode[2], drive;
1554 	u_int32_t oidetim, idetim, idedma_ctl;
1555 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1556 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1557 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1558 
1559 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1560 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1561 	idedma_ctl = 0;
1562 
1563 	/* set up new idetim: Enable IDE registers decode */
1564 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1565 	    chp->channel);
1566 
1567 	/* setup DMA */
1568 	pciide_channel_dma_setup(cp);
1569 
1570 	/*
1571 	 * Here we have to mess up with drives mode: PIIX can't have
1572 	 * different timings for master and slave drives.
1573 	 * We need to find the best combination.
1574 	 */
1575 
1576 	/* If both drives supports DMA, take the lower mode */
1577 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1578 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1579 		mode[0] = mode[1] =
1580 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1581 		    drvp[0].DMA_mode = mode[0];
1582 		    drvp[1].DMA_mode = mode[1];
1583 		goto ok;
1584 	}
1585 	/*
1586 	 * If only one drive supports DMA, use its mode, and
1587 	 * put the other one in PIO mode 0 if mode not compatible
1588 	 */
1589 	if (drvp[0].drive_flags & DRIVE_DMA) {
1590 		mode[0] = drvp[0].DMA_mode;
1591 		mode[1] = drvp[1].PIO_mode;
1592 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1593 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1594 			mode[1] = drvp[1].PIO_mode = 0;
1595 		goto ok;
1596 	}
1597 	if (drvp[1].drive_flags & DRIVE_DMA) {
1598 		mode[1] = drvp[1].DMA_mode;
1599 		mode[0] = drvp[0].PIO_mode;
1600 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1601 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1602 			mode[0] = drvp[0].PIO_mode = 0;
1603 		goto ok;
1604 	}
1605 	/*
1606 	 * If both drives are not DMA, takes the lower mode, unless
1607 	 * one of them is PIO mode < 2
1608 	 */
1609 	if (drvp[0].PIO_mode < 2) {
1610 		mode[0] = drvp[0].PIO_mode = 0;
1611 		mode[1] = drvp[1].PIO_mode;
1612 	} else if (drvp[1].PIO_mode < 2) {
1613 		mode[1] = drvp[1].PIO_mode = 0;
1614 		mode[0] = drvp[0].PIO_mode;
1615 	} else {
1616 		mode[0] = mode[1] =
1617 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1618 		drvp[0].PIO_mode = mode[0];
1619 		drvp[1].PIO_mode = mode[1];
1620 	}
1621 ok:	/* The modes are setup */
1622 	for (drive = 0; drive < 2; drive++) {
1623 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1624 			idetim |= piix_setup_idetim_timings(
1625 			    mode[drive], 1, chp->channel);
1626 			goto end;
1627 		}
1628 	}
1629 	/* If we are there, none of the drives are DMA */
1630 	if (mode[0] >= 2)
1631 		idetim |= piix_setup_idetim_timings(
1632 		    mode[0], 0, chp->channel);
1633 	else
1634 		idetim |= piix_setup_idetim_timings(
1635 		    mode[1], 0, chp->channel);
1636 end:	/*
1637 	 * timing mode is now set up in the controller. Enable
1638 	 * it per-drive
1639 	 */
1640 	for (drive = 0; drive < 2; drive++) {
1641 		/* If no drive, skip */
1642 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1643 			continue;
1644 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1645 		if (drvp[drive].drive_flags & DRIVE_DMA)
1646 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1647 	}
1648 	if (idedma_ctl != 0) {
1649 		/* Add software bits in status register */
1650 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1651 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1652 		    idedma_ctl);
1653 	}
1654 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1655 	pciide_print_modes(cp);
1656 }
1657 
1658 void
1659 piix3_4_setup_channel(chp)
1660 	struct channel_softc *chp;
1661 {
1662 	struct ata_drive_datas *drvp;
1663 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1664 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1665 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1666 	int drive;
1667 	int channel = chp->channel;
1668 
1669 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1670 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1671 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1672 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1673 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1674 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1675 	    PIIX_SIDETIM_RTC_MASK(channel));
1676 
1677 	idedma_ctl = 0;
1678 	/* If channel disabled, no need to go further */
1679 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1680 		return;
1681 	/* set up new idetim: Enable IDE registers decode */
1682 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1683 
1684 	/* setup DMA if needed */
1685 	pciide_channel_dma_setup(cp);
1686 
1687 	for (drive = 0; drive < 2; drive++) {
1688 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1689 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1690 		drvp = &chp->ch_drive[drive];
1691 		/* If no drive, skip */
1692 		if ((drvp->drive_flags & DRIVE) == 0)
1693 			continue;
1694 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1695 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1696 			goto pio;
1697 
1698 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1699 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1700 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1701 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1702 			ideconf |= PIIX_CONFIG_PINGPONG;
1703 		}
1704 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1705 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1706 			/* setup Ultra/100 */
1707 			if (drvp->UDMA_mode > 2 &&
1708 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1709 				drvp->UDMA_mode = 2;
1710 			if (drvp->UDMA_mode > 4) {
1711 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1712 			} else {
1713 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1714 				if (drvp->UDMA_mode > 2) {
1715 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1716 					    drive);
1717 				} else {
1718 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1719 					    drive);
1720 				}
1721 			}
1722 		}
1723 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1724 			/* setup Ultra/66 */
1725 			if (drvp->UDMA_mode > 2 &&
1726 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1727 				drvp->UDMA_mode = 2;
1728 			if (drvp->UDMA_mode > 2)
1729 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1730 			else
1731 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1732 		}
1733 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1734 		    (drvp->drive_flags & DRIVE_UDMA)) {
1735 			/* use Ultra/DMA */
1736 			drvp->drive_flags &= ~DRIVE_DMA;
1737 			udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1738 			udmareg |= PIIX_UDMATIM_SET(
1739 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1740 		} else {
1741 			/* use Multiword DMA */
1742 			drvp->drive_flags &= ~DRIVE_UDMA;
1743 			if (drive == 0) {
1744 				idetim |= piix_setup_idetim_timings(
1745 				    drvp->DMA_mode, 1, channel);
1746 			} else {
1747 				sidetim |= piix_setup_sidetim_timings(
1748 					drvp->DMA_mode, 1, channel);
1749 				idetim =PIIX_IDETIM_SET(idetim,
1750 				    PIIX_IDETIM_SITRE, channel);
1751 			}
1752 		}
1753 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1754 
1755 pio:		/* use PIO mode */
1756 		idetim |= piix_setup_idetim_drvs(drvp);
1757 		if (drive == 0) {
1758 			idetim |= piix_setup_idetim_timings(
1759 			    drvp->PIO_mode, 0, channel);
1760 		} else {
1761 			sidetim |= piix_setup_sidetim_timings(
1762 				drvp->PIO_mode, 0, channel);
1763 			idetim =PIIX_IDETIM_SET(idetim,
1764 			    PIIX_IDETIM_SITRE, channel);
1765 		}
1766 	}
1767 	if (idedma_ctl != 0) {
1768 		/* Add software bits in status register */
1769 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1770 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1771 		    idedma_ctl);
1772 	}
1773 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1774 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1775 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1776 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1777 	pciide_print_modes(cp);
1778 }
1779 
1780 
1781 /* setup ISP and RTC fields, based on mode */
1782 static u_int32_t
1783 piix_setup_idetim_timings(mode, dma, channel)
1784 	u_int8_t mode;
1785 	u_int8_t dma;
1786 	u_int8_t channel;
1787 {
1788 
1789 	if (dma)
1790 		return PIIX_IDETIM_SET(0,
1791 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1792 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1793 		    channel);
1794 	else
1795 		return PIIX_IDETIM_SET(0,
1796 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1797 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1798 		    channel);
1799 }
1800 
1801 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1802 static u_int32_t
1803 piix_setup_idetim_drvs(drvp)
1804 	struct ata_drive_datas *drvp;
1805 {
1806 	u_int32_t ret = 0;
1807 	struct channel_softc *chp = drvp->chnl_softc;
1808 	u_int8_t channel = chp->channel;
1809 	u_int8_t drive = drvp->drive;
1810 
1811 	/*
1812 	 * If drive is using UDMA, timings setups are independant
1813 	 * So just check DMA and PIO here.
1814 	 */
1815 	if (drvp->drive_flags & DRIVE_DMA) {
1816 		/* if mode = DMA mode 0, use compatible timings */
1817 		if ((drvp->drive_flags & DRIVE_DMA) &&
1818 		    drvp->DMA_mode == 0) {
1819 			drvp->PIO_mode = 0;
1820 			return ret;
1821 		}
1822 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1823 		/*
1824 		 * PIO and DMA timings are the same, use fast timings for PIO
1825 		 * too, else use compat timings.
1826 		 */
1827 		if ((piix_isp_pio[drvp->PIO_mode] !=
1828 		    piix_isp_dma[drvp->DMA_mode]) ||
1829 		    (piix_rtc_pio[drvp->PIO_mode] !=
1830 		    piix_rtc_dma[drvp->DMA_mode]))
1831 			drvp->PIO_mode = 0;
1832 		/* if PIO mode <= 2, use compat timings for PIO */
1833 		if (drvp->PIO_mode <= 2) {
1834 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1835 			    channel);
1836 			return ret;
1837 		}
1838 	}
1839 
1840 	/*
1841 	 * Now setup PIO modes. If mode < 2, use compat timings.
1842 	 * Else enable fast timings. Enable IORDY and prefetch/post
1843 	 * if PIO mode >= 3.
1844 	 */
1845 
1846 	if (drvp->PIO_mode < 2)
1847 		return ret;
1848 
1849 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1850 	if (drvp->PIO_mode >= 3) {
1851 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1852 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1853 	}
1854 	return ret;
1855 }
1856 
1857 /* setup values in SIDETIM registers, based on mode */
1858 static u_int32_t
1859 piix_setup_sidetim_timings(mode, dma, channel)
1860 	u_int8_t mode;
1861 	u_int8_t dma;
1862 	u_int8_t channel;
1863 {
1864 	if (dma)
1865 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1866 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1867 	else
1868 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1869 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1870 }
1871 
1872 void
1873 amd7x6_chip_map(sc, pa)
1874 	struct pciide_softc *sc;
1875 	struct pci_attach_args *pa;
1876 {
1877 	struct pciide_channel *cp;
1878 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1879 	int channel;
1880 	pcireg_t chanenable;
1881 	bus_size_t cmdsize, ctlsize;
1882 
1883 	if (pciide_chipen(sc, pa) == 0)
1884 		return;
1885 	printf("%s: bus-master DMA support present",
1886 	    sc->sc_wdcdev.sc_dev.dv_xname);
1887 	pciide_mapreg_dma(sc, pa);
1888 	printf("\n");
1889 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1890 	    WDC_CAPABILITY_MODE;
1891 	if (sc->sc_dma_ok) {
1892 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1893 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1894 		sc->sc_wdcdev.irqack = pciide_irqack;
1895 	}
1896 	sc->sc_wdcdev.PIO_cap = 4;
1897 	sc->sc_wdcdev.DMA_cap = 2;
1898 
1899 	if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1900 		sc->sc_wdcdev.UDMA_cap = 5;
1901 	else
1902 		sc->sc_wdcdev.UDMA_cap = 4;
1903 	sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1904 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1905 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1906 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1907 
1908 	WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1909 	    DEBUG_PROBE);
1910 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1911 		cp = &sc->pciide_channels[channel];
1912 		if (pciide_chansetup(sc, channel, interface) == 0)
1913 			continue;
1914 
1915 		if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1916 			printf("%s: %s channel ignored (disabled)\n",
1917 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1918 			continue;
1919 		}
1920 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1921 		    pciide_pci_intr);
1922 
1923 		if (pciide_chan_candisable(cp))
1924 			chanenable &= ~AMD7X6_CHAN_EN(channel);
1925 		pciide_map_compat_intr(pa, cp, channel, interface);
1926 		if (cp->hw_ok == 0)
1927 			continue;
1928 
1929 		amd7x6_setup_channel(&cp->wdc_channel);
1930 	}
1931 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1932 	    chanenable);
1933 	return;
1934 }
1935 
1936 void
1937 amd7x6_setup_channel(chp)
1938 	struct channel_softc *chp;
1939 {
1940 	u_int32_t udmatim_reg, datatim_reg;
1941 	u_int8_t idedma_ctl;
1942 	int mode, drive;
1943 	struct ata_drive_datas *drvp;
1944 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1945 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1946 #ifndef PCIIDE_AMD756_ENABLEDMA
1947 	int rev = PCI_REVISION(
1948 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1949 #endif
1950 
1951 	idedma_ctl = 0;
1952 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1953 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1954 	datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1955 	udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1956 
1957 	/* setup DMA if needed */
1958 	pciide_channel_dma_setup(cp);
1959 
1960 	for (drive = 0; drive < 2; drive++) {
1961 		drvp = &chp->ch_drive[drive];
1962 		/* If no drive, skip */
1963 		if ((drvp->drive_flags & DRIVE) == 0)
1964 			continue;
1965 		/* add timing values, setup DMA if needed */
1966 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1967 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1968 			mode = drvp->PIO_mode;
1969 			goto pio;
1970 		}
1971 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1972 		    (drvp->drive_flags & DRIVE_UDMA)) {
1973 			/* use Ultra/DMA */
1974 			drvp->drive_flags &= ~DRIVE_DMA;
1975 			udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1976 			    AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1977 			    AMD7X6_UDMA_TIME(chp->channel, drive,
1978 				amd7x6_udma_tim[drvp->UDMA_mode]);
1979 			/* can use PIO timings, MW DMA unused */
1980 			mode = drvp->PIO_mode;
1981 		} else {
1982 			/* use Multiword DMA, but only if revision is OK */
1983 			drvp->drive_flags &= ~DRIVE_UDMA;
1984 #ifndef PCIIDE_AMD756_ENABLEDMA
1985 			/*
1986 			 * The workaround doesn't seem to be necessary
1987 			 * with all drives, so it can be disabled by
1988 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
1989 			 * triggered.
1990 			 */
1991 			if (sc->sc_pp->ide_product ==
1992 			      PCI_PRODUCT_AMD_PBC756_IDE &&
1993 			    AMD756_CHIPREV_DISABLEDMA(rev)) {
1994 				printf("%s:%d:%d: multi-word DMA disabled due "
1995 				    "to chip revision\n",
1996 				    sc->sc_wdcdev.sc_dev.dv_xname,
1997 				    chp->channel, drive);
1998 				mode = drvp->PIO_mode;
1999 				drvp->drive_flags &= ~DRIVE_DMA;
2000 				goto pio;
2001 			}
2002 #endif
2003 			/* mode = min(pio, dma+2) */
2004 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2005 				mode = drvp->PIO_mode;
2006 			else
2007 				mode = drvp->DMA_mode + 2;
2008 		}
2009 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2010 
2011 pio:		/* setup PIO mode */
2012 		if (mode <= 2) {
2013 			drvp->DMA_mode = 0;
2014 			drvp->PIO_mode = 0;
2015 			mode = 0;
2016 		} else {
2017 			drvp->PIO_mode = mode;
2018 			drvp->DMA_mode = mode - 2;
2019 		}
2020 		datatim_reg |=
2021 		    AMD7X6_DATATIM_PULSE(chp->channel, drive,
2022 			amd7x6_pio_set[mode]) |
2023 		    AMD7X6_DATATIM_RECOV(chp->channel, drive,
2024 			amd7x6_pio_rec[mode]);
2025 	}
2026 	if (idedma_ctl != 0) {
2027 		/* Add software bits in status register */
2028 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2029 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2030 		    idedma_ctl);
2031 	}
2032 	pciide_print_modes(cp);
2033 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2034 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2035 }
2036 
2037 void
2038 apollo_chip_map(sc, pa)
2039 	struct pciide_softc *sc;
2040 	struct pci_attach_args *pa;
2041 {
2042 	struct pciide_channel *cp;
2043 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2044 	int channel;
2045 	u_int32_t ideconf;
2046 	bus_size_t cmdsize, ctlsize;
2047 	pcitag_t pcib_tag;
2048 	pcireg_t pcib_id, pcib_class;
2049 
2050 	if (pciide_chipen(sc, pa) == 0)
2051 		return;
2052 	/* get a PCI tag for the ISA bridge (function 0 of the same device) */
2053 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2054 	/* and read ID and rev of the ISA bridge */
2055 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2056 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2057 	printf(": VIA Technologies ");
2058 	switch (PCI_PRODUCT(pcib_id)) {
2059 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2060 		printf("VT82C586 (Apollo VP) ");
2061 		if(PCI_REVISION(pcib_class) >= 0x02) {
2062 			printf("ATA33 controller\n");
2063 			sc->sc_wdcdev.UDMA_cap = 2;
2064 		} else {
2065 			printf("controller\n");
2066 			sc->sc_wdcdev.UDMA_cap = 0;
2067 		}
2068 		break;
2069 	case PCI_PRODUCT_VIATECH_VT82C596A:
2070 		printf("VT82C596A (Apollo Pro) ");
2071 		if (PCI_REVISION(pcib_class) >= 0x12) {
2072 			printf("ATA66 controller\n");
2073 			sc->sc_wdcdev.UDMA_cap = 4;
2074 		} else {
2075 			printf("ATA33 controller\n");
2076 			sc->sc_wdcdev.UDMA_cap = 2;
2077 		}
2078 		break;
2079 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2080 		printf("VT82C686A (Apollo KX133) ");
2081 		if (PCI_REVISION(pcib_class) >= 0x40) {
2082 			printf("ATA100 controller\n");
2083 			sc->sc_wdcdev.UDMA_cap = 5;
2084 		} else {
2085 			printf("ATA66 controller\n");
2086 			sc->sc_wdcdev.UDMA_cap = 4;
2087 		}
2088 		break;
2089 	default:
2090 		printf("unknown ATA controller\n");
2091 		sc->sc_wdcdev.UDMA_cap = 0;
2092 	}
2093 
2094 	printf("%s: bus-master DMA support present",
2095 	    sc->sc_wdcdev.sc_dev.dv_xname);
2096 	pciide_mapreg_dma(sc, pa);
2097 	printf("\n");
2098 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2099 	    WDC_CAPABILITY_MODE;
2100 	if (sc->sc_dma_ok) {
2101 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2102 		sc->sc_wdcdev.irqack = pciide_irqack;
2103 		if (sc->sc_wdcdev.UDMA_cap > 0)
2104 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2105 	}
2106 	sc->sc_wdcdev.PIO_cap = 4;
2107 	sc->sc_wdcdev.DMA_cap = 2;
2108 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2109 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2110 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2111 
2112 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2113 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2114 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2115 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2116 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2117 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2118 	    DEBUG_PROBE);
2119 
2120 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2121 		cp = &sc->pciide_channels[channel];
2122 		if (pciide_chansetup(sc, channel, interface) == 0)
2123 			continue;
2124 
2125 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2126 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2127 			printf("%s: %s channel ignored (disabled)\n",
2128 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2129 			continue;
2130 		}
2131 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2132 		    pciide_pci_intr);
2133 		if (cp->hw_ok == 0)
2134 			continue;
2135 		if (pciide_chan_candisable(cp)) {
2136 			ideconf &= ~APO_IDECONF_EN(channel);
2137 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2138 			    ideconf);
2139 		}
2140 		pciide_map_compat_intr(pa, cp, channel, interface);
2141 
2142 		if (cp->hw_ok == 0)
2143 			continue;
2144 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2145 	}
2146 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2147 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2148 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2149 }
2150 
2151 void
2152 apollo_setup_channel(chp)
2153 	struct channel_softc *chp;
2154 {
2155 	u_int32_t udmatim_reg, datatim_reg;
2156 	u_int8_t idedma_ctl;
2157 	int mode, drive;
2158 	struct ata_drive_datas *drvp;
2159 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2160 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2161 
2162 	idedma_ctl = 0;
2163 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2164 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2165 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2166 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2167 
2168 	/* setup DMA if needed */
2169 	pciide_channel_dma_setup(cp);
2170 
2171 	for (drive = 0; drive < 2; drive++) {
2172 		drvp = &chp->ch_drive[drive];
2173 		/* If no drive, skip */
2174 		if ((drvp->drive_flags & DRIVE) == 0)
2175 			continue;
2176 		/* add timing values, setup DMA if needed */
2177 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2178 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2179 			mode = drvp->PIO_mode;
2180 			goto pio;
2181 		}
2182 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2183 		    (drvp->drive_flags & DRIVE_UDMA)) {
2184 			/* use Ultra/DMA */
2185 			drvp->drive_flags &= ~DRIVE_DMA;
2186 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2187 			    APO_UDMA_EN_MTH(chp->channel, drive);
2188 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2189 				/* 686b */
2190 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2191 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2192 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2193 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2194 				/* 596b or 686a */
2195 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2196 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2197 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2198 			} else {
2199 				/* 596a or 586b */
2200 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2201 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2202 			}
2203 			/* can use PIO timings, MW DMA unused */
2204 			mode = drvp->PIO_mode;
2205 		} else {
2206 			/* use Multiword DMA */
2207 			drvp->drive_flags &= ~DRIVE_UDMA;
2208 			/* mode = min(pio, dma+2) */
2209 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2210 				mode = drvp->PIO_mode;
2211 			else
2212 				mode = drvp->DMA_mode + 2;
2213 		}
2214 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2215 
2216 pio:		/* setup PIO mode */
2217 		if (mode <= 2) {
2218 			drvp->DMA_mode = 0;
2219 			drvp->PIO_mode = 0;
2220 			mode = 0;
2221 		} else {
2222 			drvp->PIO_mode = mode;
2223 			drvp->DMA_mode = mode - 2;
2224 		}
2225 		datatim_reg |=
2226 		    APO_DATATIM_PULSE(chp->channel, drive,
2227 			apollo_pio_set[mode]) |
2228 		    APO_DATATIM_RECOV(chp->channel, drive,
2229 			apollo_pio_rec[mode]);
2230 	}
2231 	if (idedma_ctl != 0) {
2232 		/* Add software bits in status register */
2233 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2234 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2235 		    idedma_ctl);
2236 	}
2237 	pciide_print_modes(cp);
2238 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2239 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2240 }
2241 
2242 void
2243 cmd_channel_map(pa, sc, channel)
2244 	struct pci_attach_args *pa;
2245 	struct pciide_softc *sc;
2246 	int channel;
2247 {
2248 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2249 	bus_size_t cmdsize, ctlsize;
2250 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2251 	int interface;
2252 
2253 	/*
2254 	 * The 0648/0649 can be told to identify as a RAID controller.
2255 	 * In this case, we have to fake interface
2256 	 */
2257 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2258 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2259 		    PCIIDE_INTERFACE_SETTABLE(1);
2260 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2261 		    CMD_CONF_DSA1)
2262 			interface |= PCIIDE_INTERFACE_PCI(0) |
2263 			    PCIIDE_INTERFACE_PCI(1);
2264 	} else {
2265 		interface = PCI_INTERFACE(pa->pa_class);
2266 	}
2267 
2268 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2269 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2270 	cp->wdc_channel.channel = channel;
2271 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2272 
2273 	if (channel > 0) {
2274 		cp->wdc_channel.ch_queue =
2275 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2276 	} else {
2277 		cp->wdc_channel.ch_queue =
2278 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2279 	}
2280 	if (cp->wdc_channel.ch_queue == NULL) {
2281 		printf("%s %s channel: "
2282 		    "can't allocate memory for command queue",
2283 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2284 		    return;
2285 	}
2286 
2287 	printf("%s: %s channel %s to %s mode\n",
2288 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2289 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2290 	    "configured" : "wired",
2291 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2292 	    "native-PCI" : "compatibility");
2293 
2294 	/*
2295 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2296 	 * there's no way to disable the first channel without disabling
2297 	 * the whole device
2298 	 */
2299 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2300 		printf("%s: %s channel ignored (disabled)\n",
2301 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2302 		return;
2303 	}
2304 
2305 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2306 	if (cp->hw_ok == 0)
2307 		return;
2308 	if (channel == 1) {
2309 		if (pciide_chan_candisable(cp)) {
2310 			ctrl &= ~CMD_CTRL_2PORT;
2311 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2312 			    CMD_CTRL, ctrl);
2313 		}
2314 	}
2315 	pciide_map_compat_intr(pa, cp, channel, interface);
2316 }
2317 
2318 int
2319 cmd_pci_intr(arg)
2320 	void *arg;
2321 {
2322 	struct pciide_softc *sc = arg;
2323 	struct pciide_channel *cp;
2324 	struct channel_softc *wdc_cp;
2325 	int i, rv, crv;
2326 	u_int32_t priirq, secirq;
2327 
2328 	rv = 0;
2329 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2330 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2331 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2332 		cp = &sc->pciide_channels[i];
2333 		wdc_cp = &cp->wdc_channel;
2334 		/* If a compat channel skip. */
2335 		if (cp->compat)
2336 			continue;
2337 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2338 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2339 			crv = wdcintr(wdc_cp);
2340 			if (crv == 0)
2341 				printf("%s:%d: bogus intr\n",
2342 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2343 			else
2344 				rv = 1;
2345 		}
2346 	}
2347 	return rv;
2348 }
2349 
2350 void
2351 cmd_chip_map(sc, pa)
2352 	struct pciide_softc *sc;
2353 	struct pci_attach_args *pa;
2354 {
2355 	int channel;
2356 
2357 	/*
2358 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2359 	 * and base adresses registers can be disabled at
2360 	 * hardware level. In this case, the device is wired
2361 	 * in compat mode and its first channel is always enabled,
2362 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2363 	 * In fact, it seems that the first channel of the CMD PCI0640
2364 	 * can't be disabled.
2365 	 */
2366 
2367 #ifdef PCIIDE_CMD064x_DISABLE
2368 	if (pciide_chipen(sc, pa) == 0)
2369 		return;
2370 #endif
2371 
2372 	printf("%s: hardware does not support DMA\n",
2373 	    sc->sc_wdcdev.sc_dev.dv_xname);
2374 	sc->sc_dma_ok = 0;
2375 
2376 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2377 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2378 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2379 
2380 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2381 		cmd_channel_map(pa, sc, channel);
2382 	}
2383 }
2384 
2385 void
2386 cmd0643_9_chip_map(sc, pa)
2387 	struct pciide_softc *sc;
2388 	struct pci_attach_args *pa;
2389 {
2390 	struct pciide_channel *cp;
2391 	int channel;
2392 	int rev = PCI_REVISION(
2393 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2394 
2395 	/*
2396 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2397 	 * and base adresses registers can be disabled at
2398 	 * hardware level. In this case, the device is wired
2399 	 * in compat mode and its first channel is always enabled,
2400 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2401 	 * In fact, it seems that the first channel of the CMD PCI0640
2402 	 * can't be disabled.
2403 	 */
2404 
2405 #ifdef PCIIDE_CMD064x_DISABLE
2406 	if (pciide_chipen(sc, pa) == 0)
2407 		return;
2408 #endif
2409 	printf("%s: bus-master DMA support present",
2410 	    sc->sc_wdcdev.sc_dev.dv_xname);
2411 	pciide_mapreg_dma(sc, pa);
2412 	printf("\n");
2413 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2414 	    WDC_CAPABILITY_MODE;
2415 	if (sc->sc_dma_ok) {
2416 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2417 		switch (sc->sc_pp->ide_product) {
2418 		case PCI_PRODUCT_CMDTECH_649:
2419 		case PCI_PRODUCT_CMDTECH_648:
2420 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2421 			sc->sc_wdcdev.UDMA_cap = 4;
2422 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2423 			break;
2424 		case PCI_PRODUCT_CMDTECH_646:
2425 			if (rev >= CMD0646U2_REV) {
2426 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2427 				sc->sc_wdcdev.UDMA_cap = 2;
2428 			} else if (rev >= CMD0646U_REV) {
2429 			/*
2430 			 * Linux's driver claims that the 646U is broken
2431 			 * with UDMA. Only enable it if we know what we're
2432 			 * doing
2433 			 */
2434 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2435 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2436 				sc->sc_wdcdev.UDMA_cap = 2;
2437 #endif
2438 				/* explicitely disable UDMA */
2439 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2440 				    CMD_UDMATIM(0), 0);
2441 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2442 				    CMD_UDMATIM(1), 0);
2443 			}
2444 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2445 			break;
2446 		default:
2447 			sc->sc_wdcdev.irqack = pciide_irqack;
2448 		}
2449 	}
2450 
2451 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2452 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2453 	sc->sc_wdcdev.PIO_cap = 4;
2454 	sc->sc_wdcdev.DMA_cap = 2;
2455 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2456 
2457 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2458 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2459 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2460 		DEBUG_PROBE);
2461 
2462 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2463 		cp = &sc->pciide_channels[channel];
2464 		cmd_channel_map(pa, sc, channel);
2465 		if (cp->hw_ok == 0)
2466 			continue;
2467 		cmd0643_9_setup_channel(&cp->wdc_channel);
2468 	}
2469 	/*
2470 	 * note - this also makes sure we clear the irq disable and reset
2471 	 * bits
2472 	 */
2473 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2474 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2475 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2476 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2477 	    DEBUG_PROBE);
2478 }
2479 
2480 void
2481 cmd0643_9_setup_channel(chp)
2482 	struct channel_softc *chp;
2483 {
2484 	struct ata_drive_datas *drvp;
2485 	u_int8_t tim;
2486 	u_int32_t idedma_ctl, udma_reg;
2487 	int drive;
2488 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2489 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2490 
2491 	idedma_ctl = 0;
2492 	/* setup DMA if needed */
2493 	pciide_channel_dma_setup(cp);
2494 
2495 	for (drive = 0; drive < 2; drive++) {
2496 		drvp = &chp->ch_drive[drive];
2497 		/* If no drive, skip */
2498 		if ((drvp->drive_flags & DRIVE) == 0)
2499 			continue;
2500 		/* add timing values, setup DMA if needed */
2501 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2502 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2503 			if (drvp->drive_flags & DRIVE_UDMA) {
2504 				/* UltraDMA on a 646U2, 0648 or 0649 */
2505 				drvp->drive_flags &= ~DRIVE_DMA;
2506 				udma_reg = pciide_pci_read(sc->sc_pc,
2507 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2508 				if (drvp->UDMA_mode > 2 &&
2509 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2510 				    CMD_BICSR) &
2511 				    CMD_BICSR_80(chp->channel)) == 0)
2512 					drvp->UDMA_mode = 2;
2513 				if (drvp->UDMA_mode > 2)
2514 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2515 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2516 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2517 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2518 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2519 				    CMD_UDMATIM_TIM_OFF(drive));
2520 				udma_reg |=
2521 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2522 				    CMD_UDMATIM_TIM_OFF(drive));
2523 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2524 				    CMD_UDMATIM(chp->channel), udma_reg);
2525 			} else {
2526 				/*
2527 				 * use Multiword DMA.
2528 				 * Timings will be used for both PIO and DMA,
2529 				 * so adjust DMA mode if needed
2530 				 * if we have a 0646U2/8/9, turn off UDMA
2531 				 */
2532 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2533 					udma_reg = pciide_pci_read(sc->sc_pc,
2534 					    sc->sc_tag,
2535 					    CMD_UDMATIM(chp->channel));
2536 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2537 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2538 					    CMD_UDMATIM(chp->channel),
2539 					    udma_reg);
2540 				}
2541 				if (drvp->PIO_mode >= 3 &&
2542 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2543 					drvp->DMA_mode = drvp->PIO_mode - 2;
2544 				}
2545 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2546 			}
2547 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2548 		}
2549 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2550 		    CMD_DATA_TIM(chp->channel, drive), tim);
2551 	}
2552 	if (idedma_ctl != 0) {
2553 		/* Add software bits in status register */
2554 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2555 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2556 		    idedma_ctl);
2557 	}
2558 	pciide_print_modes(cp);
2559 }
2560 
2561 void
2562 cmd646_9_irqack(chp)
2563 	struct channel_softc *chp;
2564 {
2565 	u_int32_t priirq, secirq;
2566 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2567 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2568 
2569 	if (chp->channel == 0) {
2570 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2571 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2572 	} else {
2573 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2574 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2575 	}
2576 	pciide_irqack(chp);
2577 }
2578 
2579 void
2580 cy693_chip_map(sc, pa)
2581 	struct pciide_softc *sc;
2582 	struct pci_attach_args *pa;
2583 {
2584 	struct pciide_channel *cp;
2585 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2586 	bus_size_t cmdsize, ctlsize;
2587 
2588 	if (pciide_chipen(sc, pa) == 0)
2589 		return;
2590 	/*
2591 	 * this chip has 2 PCI IDE functions, one for primary and one for
2592 	 * secondary. So we need to call pciide_mapregs_compat() with
2593 	 * the real channel
2594 	 */
2595 	if (pa->pa_function == 1) {
2596 		sc->sc_cy_compatchan = 0;
2597 	} else if (pa->pa_function == 2) {
2598 		sc->sc_cy_compatchan = 1;
2599 	} else {
2600 		printf("%s: unexpected PCI function %d\n",
2601 		    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2602 		return;
2603 	}
2604 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2605 		printf("%s: bus-master DMA support present",
2606 		    sc->sc_wdcdev.sc_dev.dv_xname);
2607 		pciide_mapreg_dma(sc, pa);
2608 	} else {
2609 		printf("%s: hardware does not support DMA",
2610 		    sc->sc_wdcdev.sc_dev.dv_xname);
2611 		sc->sc_dma_ok = 0;
2612 	}
2613 	printf("\n");
2614 
2615 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2616 	if (sc->sc_cy_handle == NULL) {
2617 		printf("%s: unable to map hyperCache control registers\n",
2618 		    sc->sc_wdcdev.sc_dev.dv_xname);
2619 		sc->sc_dma_ok = 0;
2620 	}
2621 
2622 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2623 	    WDC_CAPABILITY_MODE;
2624 	if (sc->sc_dma_ok) {
2625 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2626 		sc->sc_wdcdev.irqack = pciide_irqack;
2627 	}
2628 	sc->sc_wdcdev.PIO_cap = 4;
2629 	sc->sc_wdcdev.DMA_cap = 2;
2630 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2631 
2632 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2633 	sc->sc_wdcdev.nchannels = 1;
2634 
2635 	/* Only one channel for this chip; if we are here it's enabled */
2636 	cp = &sc->pciide_channels[0];
2637 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2638 	cp->name = PCIIDE_CHANNEL_NAME(0);
2639 	cp->wdc_channel.channel = 0;
2640 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2641 	cp->wdc_channel.ch_queue =
2642 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2643 	if (cp->wdc_channel.ch_queue == NULL) {
2644 		printf("%s primary channel: "
2645 		    "can't allocate memory for command queue",
2646 		sc->sc_wdcdev.sc_dev.dv_xname);
2647 		return;
2648 	}
2649 	printf("%s: primary channel %s to ",
2650 	    sc->sc_wdcdev.sc_dev.dv_xname,
2651 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2652 	    "configured" : "wired");
2653 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2654 		printf("native-PCI");
2655 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2656 		    pciide_pci_intr);
2657 	} else {
2658 		printf("compatibility");
2659 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2660 		    &cmdsize, &ctlsize);
2661 	}
2662 	printf(" mode\n");
2663 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2664 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2665 	wdcattach(&cp->wdc_channel);
2666 	if (pciide_chan_candisable(cp)) {
2667 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2668 		    PCI_COMMAND_STATUS_REG, 0);
2669 	}
2670 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2671 	if (cp->hw_ok == 0)
2672 		return;
2673 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2674 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2675 	cy693_setup_channel(&cp->wdc_channel);
2676 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2677 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2678 }
2679 
2680 void
2681 cy693_setup_channel(chp)
2682 	struct channel_softc *chp;
2683 {
2684 	struct ata_drive_datas *drvp;
2685 	int drive;
2686 	u_int32_t cy_cmd_ctrl;
2687 	u_int32_t idedma_ctl;
2688 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2689 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2690 	int dma_mode = -1;
2691 
2692 	cy_cmd_ctrl = idedma_ctl = 0;
2693 
2694 	/* setup DMA if needed */
2695 	pciide_channel_dma_setup(cp);
2696 
2697 	for (drive = 0; drive < 2; drive++) {
2698 		drvp = &chp->ch_drive[drive];
2699 		/* If no drive, skip */
2700 		if ((drvp->drive_flags & DRIVE) == 0)
2701 			continue;
2702 		/* add timing values, setup DMA if needed */
2703 		if (drvp->drive_flags & DRIVE_DMA) {
2704 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2705 			/* use Multiword DMA */
2706 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2707 				dma_mode = drvp->DMA_mode;
2708 		}
2709 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2710 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2711 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2712 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2713 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2714 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2715 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2716 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2717 	}
2718 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2719 	chp->ch_drive[0].DMA_mode = dma_mode;
2720 	chp->ch_drive[1].DMA_mode = dma_mode;
2721 
2722 	if (dma_mode == -1)
2723 		dma_mode = 0;
2724 
2725 	if (sc->sc_cy_handle != NULL) {
2726 		/* Note: `multiple' is implied. */
2727 		cy82c693_write(sc->sc_cy_handle,
2728 		    (sc->sc_cy_compatchan == 0) ?
2729 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2730 	}
2731 
2732 	pciide_print_modes(cp);
2733 
2734 	if (idedma_ctl != 0) {
2735 		/* Add software bits in status register */
2736 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2737 		    IDEDMA_CTL, idedma_ctl);
2738 	}
2739 }
2740 
2741 static int
2742 sis_hostbr_match(pa)
2743 	struct pci_attach_args *pa;
2744 {
2745 	return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2746 	   ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2747 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2748 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2749 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2750 }
2751 
2752 void
2753 sis_chip_map(sc, pa)
2754 	struct pciide_softc *sc;
2755 	struct pci_attach_args *pa;
2756 {
2757 	struct pciide_channel *cp;
2758 	int channel;
2759 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2760 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2761 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2762 	bus_size_t cmdsize, ctlsize;
2763 	pcitag_t pchb_tag;
2764 	pcireg_t pchb_id, pchb_class;
2765 
2766 	if (pciide_chipen(sc, pa) == 0)
2767 		return;
2768 	printf("%s: bus-master DMA support present",
2769 	    sc->sc_wdcdev.sc_dev.dv_xname);
2770 	pciide_mapreg_dma(sc, pa);
2771 	printf("\n");
2772 
2773 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2774 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2775 	/* and read ID and rev of the ISA bridge */
2776 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2777 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2778 
2779 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2780 	    WDC_CAPABILITY_MODE;
2781 	if (sc->sc_dma_ok) {
2782 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2783 		sc->sc_wdcdev.irqack = pciide_irqack;
2784 		/*
2785 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2786 		 * have problems with UDMA (info provided by Christos)
2787 		 */
2788 		if (rev >= 0xd0 &&
2789 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2790 		    PCI_REVISION(pchb_class) >= 0x03))
2791 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2792 	}
2793 
2794 	sc->sc_wdcdev.PIO_cap = 4;
2795 	sc->sc_wdcdev.DMA_cap = 2;
2796 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2797 		/*
2798 		 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2799 		 * chipsets.
2800 		 */
2801 		sc->sc_wdcdev.UDMA_cap =
2802 		    pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2803 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2804 
2805 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2806 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2807 
2808 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2809 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2810 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2811 
2812 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2813 		cp = &sc->pciide_channels[channel];
2814 		if (pciide_chansetup(sc, channel, interface) == 0)
2815 			continue;
2816 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2817 		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2818 			printf("%s: %s channel ignored (disabled)\n",
2819 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2820 			continue;
2821 		}
2822 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2823 		    pciide_pci_intr);
2824 		if (cp->hw_ok == 0)
2825 			continue;
2826 		if (pciide_chan_candisable(cp)) {
2827 			if (channel == 0)
2828 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2829 			else
2830 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2831 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2832 			    sis_ctr0);
2833 		}
2834 		pciide_map_compat_intr(pa, cp, channel, interface);
2835 		if (cp->hw_ok == 0)
2836 			continue;
2837 		sis_setup_channel(&cp->wdc_channel);
2838 	}
2839 }
2840 
2841 void
2842 sis_setup_channel(chp)
2843 	struct channel_softc *chp;
2844 {
2845 	struct ata_drive_datas *drvp;
2846 	int drive;
2847 	u_int32_t sis_tim;
2848 	u_int32_t idedma_ctl;
2849 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2850 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2851 
2852 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2853 	    "channel %d 0x%x\n", chp->channel,
2854 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2855 	    DEBUG_PROBE);
2856 	sis_tim = 0;
2857 	idedma_ctl = 0;
2858 	/* setup DMA if needed */
2859 	pciide_channel_dma_setup(cp);
2860 
2861 	for (drive = 0; drive < 2; drive++) {
2862 		drvp = &chp->ch_drive[drive];
2863 		/* If no drive, skip */
2864 		if ((drvp->drive_flags & DRIVE) == 0)
2865 			continue;
2866 		/* add timing values, setup DMA if needed */
2867 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2868 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2869 			goto pio;
2870 
2871 		if (drvp->drive_flags & DRIVE_UDMA) {
2872 			/* use Ultra/DMA */
2873 			drvp->drive_flags &= ~DRIVE_DMA;
2874 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2875 			    SIS_TIM_UDMA_TIME_OFF(drive);
2876 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2877 		} else {
2878 			/*
2879 			 * use Multiword DMA
2880 			 * Timings will be used for both PIO and DMA,
2881 			 * so adjust DMA mode if needed
2882 			 */
2883 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2884 				drvp->PIO_mode = drvp->DMA_mode + 2;
2885 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2886 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2887 				    drvp->PIO_mode - 2 : 0;
2888 			if (drvp->DMA_mode == 0)
2889 				drvp->PIO_mode = 0;
2890 		}
2891 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2892 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2893 		    SIS_TIM_ACT_OFF(drive);
2894 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2895 		    SIS_TIM_REC_OFF(drive);
2896 	}
2897 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2898 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2899 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2900 	if (idedma_ctl != 0) {
2901 		/* Add software bits in status register */
2902 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2903 		    IDEDMA_CTL, idedma_ctl);
2904 	}
2905 	pciide_print_modes(cp);
2906 }
2907 
2908 static int
2909 acer_isabr_match(pa)
2910 	struct pci_attach_args *pa;
2911 {
2912 	return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
2913 	   (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
2914 }
2915 
2916 void
2917 acer_chip_map(sc, pa)
2918 	struct pciide_softc *sc;
2919 	struct pci_attach_args *pa;
2920 {
2921 	struct pci_attach_args isa_pa;
2922 	struct pciide_channel *cp;
2923 	int channel;
2924 	pcireg_t cr, interface;
2925 	bus_size_t cmdsize, ctlsize;
2926 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2927 
2928 	if (pciide_chipen(sc, pa) == 0)
2929 		return;
2930 	printf("%s: bus-master DMA support present",
2931 	    sc->sc_wdcdev.sc_dev.dv_xname);
2932 	pciide_mapreg_dma(sc, pa);
2933 	printf("\n");
2934 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2935 	    WDC_CAPABILITY_MODE;
2936 	if (sc->sc_dma_ok) {
2937 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2938 		if (rev >= 0x20) {
2939 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2940 			if (rev >= 0xC4)
2941 				sc->sc_wdcdev.UDMA_cap = 5;
2942 			else if (rev >= 0xC2)
2943 				sc->sc_wdcdev.UDMA_cap = 4;
2944 			else
2945 				sc->sc_wdcdev.UDMA_cap = 2;
2946 		}
2947 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2948 		sc->sc_wdcdev.irqack = pciide_irqack;
2949 	}
2950 
2951 	sc->sc_wdcdev.PIO_cap = 4;
2952 	sc->sc_wdcdev.DMA_cap = 2;
2953 	sc->sc_wdcdev.set_modes = acer_setup_channel;
2954 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2955 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2956 
2957 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2958 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2959 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2960 
2961 	/* Enable "microsoft register bits" R/W. */
2962 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2963 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2964 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2965 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2966 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2967 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2968 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2969 	    ~ACER_CHANSTATUSREGS_RO);
2970 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2971 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2972 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2973 	/* Don't use cr, re-read the real register content instead */
2974 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2975 	    PCI_CLASS_REG));
2976 
2977 	/* From linux: enable "Cable Detection" */
2978 	if (rev >= 0xC2) {
2979 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
2980 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
2981 		    | ACER_0x4B_CDETECT);
2982 		/* set south-bridge's enable bit, m1533, 0x79 */
2983 		if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
2984 			printf("%s: can't find PCI/ISA bridge, downgrading "
2985 			    "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
2986 			sc->sc_wdcdev.UDMA_cap = 2;
2987 		} else {
2988 			if (rev == 0xC2)
2989 				/* 1543C-B0 (m1533, 0x79, bit 2) */
2990 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
2991 				    ACER_0x79,
2992 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
2993 					ACER_0x79)
2994 				    | ACER_0x79_REVC2_EN);
2995 			else
2996 				/* 1553/1535 (m1533, 0x79, bit 1) */
2997 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
2998 				    ACER_0x79,
2999 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3000 					ACER_0x79)
3001 				    | ACER_0x79_EN);
3002 		}
3003 	}
3004 
3005 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3006 		cp = &sc->pciide_channels[channel];
3007 		if (pciide_chansetup(sc, channel, interface) == 0)
3008 			continue;
3009 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3010 			printf("%s: %s channel ignored (disabled)\n",
3011 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3012 			continue;
3013 		}
3014 		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
3015 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3016 		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3017 		if (cp->hw_ok == 0)
3018 			continue;
3019 		if (pciide_chan_candisable(cp)) {
3020 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3021 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3022 			    PCI_CLASS_REG, cr);
3023 		}
3024 		pciide_map_compat_intr(pa, cp, channel, interface);
3025 		acer_setup_channel(&cp->wdc_channel);
3026 	}
3027 }
3028 
3029 void
3030 acer_setup_channel(chp)
3031 	struct channel_softc *chp;
3032 {
3033 	struct ata_drive_datas *drvp;
3034 	int drive;
3035 	u_int32_t acer_fifo_udma;
3036 	u_int32_t idedma_ctl;
3037 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3038 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3039 
3040 	idedma_ctl = 0;
3041 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3042 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3043 	    acer_fifo_udma), DEBUG_PROBE);
3044 	/* setup DMA if needed */
3045 	pciide_channel_dma_setup(cp);
3046 
3047 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3048 	    DRIVE_UDMA) { /* check 80 pins cable */
3049 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3050 		    ACER_0x4A_80PIN(chp->channel)) {
3051 			if (chp->ch_drive[0].UDMA_mode > 2)
3052 				chp->ch_drive[0].UDMA_mode = 2;
3053 			if (chp->ch_drive[1].UDMA_mode > 2)
3054 				chp->ch_drive[1].UDMA_mode = 2;
3055 		}
3056 	}
3057 
3058 	for (drive = 0; drive < 2; drive++) {
3059 		drvp = &chp->ch_drive[drive];
3060 		/* If no drive, skip */
3061 		if ((drvp->drive_flags & DRIVE) == 0)
3062 			continue;
3063 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3064 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3065 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3066 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3067 		/* clear FIFO/DMA mode */
3068 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3069 		    ACER_UDMA_EN(chp->channel, drive) |
3070 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3071 
3072 		/* add timing values, setup DMA if needed */
3073 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3074 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3075 			acer_fifo_udma |=
3076 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3077 			goto pio;
3078 		}
3079 
3080 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3081 		if (drvp->drive_flags & DRIVE_UDMA) {
3082 			/* use Ultra/DMA */
3083 			drvp->drive_flags &= ~DRIVE_DMA;
3084 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3085 			acer_fifo_udma |=
3086 			    ACER_UDMA_TIM(chp->channel, drive,
3087 				acer_udma[drvp->UDMA_mode]);
3088 			/* XXX disable if one drive < UDMA3 ? */
3089 			if (drvp->UDMA_mode >= 3) {
3090 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3091 				    ACER_0x4B,
3092 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3093 					ACER_0x4B) | ACER_0x4B_UDMA66);
3094 			}
3095 		} else {
3096 			/*
3097 			 * use Multiword DMA
3098 			 * Timings will be used for both PIO and DMA,
3099 			 * so adjust DMA mode if needed
3100 			 */
3101 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3102 				drvp->PIO_mode = drvp->DMA_mode + 2;
3103 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3104 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3105 				    drvp->PIO_mode - 2 : 0;
3106 			if (drvp->DMA_mode == 0)
3107 				drvp->PIO_mode = 0;
3108 		}
3109 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3110 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3111 		    ACER_IDETIM(chp->channel, drive),
3112 		    acer_pio[drvp->PIO_mode]);
3113 	}
3114 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3115 	    acer_fifo_udma), DEBUG_PROBE);
3116 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3117 	if (idedma_ctl != 0) {
3118 		/* Add software bits in status register */
3119 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3120 		    IDEDMA_CTL, idedma_ctl);
3121 	}
3122 	pciide_print_modes(cp);
3123 }
3124 
3125 int
3126 acer_pci_intr(arg)
3127 	void *arg;
3128 {
3129 	struct pciide_softc *sc = arg;
3130 	struct pciide_channel *cp;
3131 	struct channel_softc *wdc_cp;
3132 	int i, rv, crv;
3133 	u_int32_t chids;
3134 
3135 	rv = 0;
3136 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3137 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3138 		cp = &sc->pciide_channels[i];
3139 		wdc_cp = &cp->wdc_channel;
3140 		/* If a compat channel skip. */
3141 		if (cp->compat)
3142 			continue;
3143 		if (chids & ACER_CHIDS_INT(i)) {
3144 			crv = wdcintr(wdc_cp);
3145 			if (crv == 0)
3146 				printf("%s:%d: bogus intr\n",
3147 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3148 			else
3149 				rv = 1;
3150 		}
3151 	}
3152 	return rv;
3153 }
3154 
3155 void
3156 hpt_chip_map(sc, pa)
3157 	struct pciide_softc *sc;
3158 	struct pci_attach_args *pa;
3159 {
3160 	struct pciide_channel *cp;
3161 	int i, compatchan, revision;
3162 	pcireg_t interface;
3163 	bus_size_t cmdsize, ctlsize;
3164 
3165 	if (pciide_chipen(sc, pa) == 0)
3166 		return;
3167 	revision = PCI_REVISION(pa->pa_class);
3168 	printf(": Triones/Highpoint ");
3169 	if (revision == HPT370_REV)
3170 		printf("HPT370 IDE Controller\n");
3171 	else if (revision == HPT370A_REV)
3172 		printf("HPT370A IDE Controller\n");
3173 	else if (revision == HPT366_REV)
3174 		printf("HPT366 IDE Controller\n");
3175 	else
3176 		printf("unknown HPT IDE controller rev %d\n", revision);
3177 
3178 	/*
3179 	 * when the chip is in native mode it identifies itself as a
3180 	 * 'misc mass storage'. Fake interface in this case.
3181 	 */
3182 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3183 		interface = PCI_INTERFACE(pa->pa_class);
3184 	} else {
3185 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3186 		    PCIIDE_INTERFACE_PCI(0);
3187 		if (revision == HPT370_REV || revision == HPT370A_REV)
3188 			interface |= PCIIDE_INTERFACE_PCI(1);
3189 	}
3190 
3191 	printf("%s: bus-master DMA support present",
3192 		sc->sc_wdcdev.sc_dev.dv_xname);
3193 	pciide_mapreg_dma(sc, pa);
3194 	printf("\n");
3195 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3196 	    WDC_CAPABILITY_MODE;
3197 	if (sc->sc_dma_ok) {
3198 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3199 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3200 		sc->sc_wdcdev.irqack = pciide_irqack;
3201 	}
3202 	sc->sc_wdcdev.PIO_cap = 4;
3203 	sc->sc_wdcdev.DMA_cap = 2;
3204 
3205 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3206 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3207 	if (revision == HPT366_REV) {
3208 		sc->sc_wdcdev.UDMA_cap = 4;
3209 		/*
3210 		 * The 366 has 2 PCI IDE functions, one for primary and one
3211 		 * for secondary. So we need to call pciide_mapregs_compat()
3212 		 * with the real channel
3213 		 */
3214 		if (pa->pa_function == 0) {
3215 			compatchan = 0;
3216 		} else if (pa->pa_function == 1) {
3217 			compatchan = 1;
3218 		} else {
3219 			printf("%s: unexpected PCI function %d\n",
3220 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3221 			return;
3222 		}
3223 		sc->sc_wdcdev.nchannels = 1;
3224 	} else {
3225 		sc->sc_wdcdev.nchannels = 2;
3226 		sc->sc_wdcdev.UDMA_cap = 5;
3227 	}
3228 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3229 		cp = &sc->pciide_channels[i];
3230 		if (sc->sc_wdcdev.nchannels > 1) {
3231 			compatchan = i;
3232 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3233 			   HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3234 				printf("%s: %s channel ignored (disabled)\n",
3235 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3236 				continue;
3237 			}
3238 		}
3239 		if (pciide_chansetup(sc, i, interface) == 0)
3240 			continue;
3241 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3242 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3243 			    &ctlsize, hpt_pci_intr);
3244 		} else {
3245 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3246 			    &cmdsize, &ctlsize);
3247 		}
3248 		if (cp->hw_ok == 0)
3249 			return;
3250 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3251 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3252 		wdcattach(&cp->wdc_channel);
3253 		hpt_setup_channel(&cp->wdc_channel);
3254 	}
3255 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3256 		/*
3257 		 * HPT370_REV has a bit to disable interrupts, make sure
3258 		 * to clear it
3259 		 */
3260 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3261 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3262 		    ~HPT_CSEL_IRQDIS);
3263 	}
3264 	return;
3265 }
3266 
3267 void
3268 hpt_setup_channel(chp)
3269 	struct channel_softc *chp;
3270 {
3271 	struct ata_drive_datas *drvp;
3272 	int drive;
3273 	int cable;
3274 	u_int32_t before, after;
3275 	u_int32_t idedma_ctl;
3276 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3277 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3278 
3279 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3280 
3281 	/* setup DMA if needed */
3282 	pciide_channel_dma_setup(cp);
3283 
3284 	idedma_ctl = 0;
3285 
3286 	/* Per drive settings */
3287 	for (drive = 0; drive < 2; drive++) {
3288 		drvp = &chp->ch_drive[drive];
3289 		/* If no drive, skip */
3290 		if ((drvp->drive_flags & DRIVE) == 0)
3291 			continue;
3292 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3293 					HPT_IDETIM(chp->channel, drive));
3294 
3295 		/* add timing values, setup DMA if needed */
3296 		if (drvp->drive_flags & DRIVE_UDMA) {
3297 			/* use Ultra/DMA */
3298 			drvp->drive_flags &= ~DRIVE_DMA;
3299 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3300 			    drvp->UDMA_mode > 2)
3301 				drvp->UDMA_mode = 2;
3302 			after = (sc->sc_wdcdev.nchannels == 2) ?
3303 			    hpt370_udma[drvp->UDMA_mode] :
3304 			    hpt366_udma[drvp->UDMA_mode];
3305 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3306 		} else if (drvp->drive_flags & DRIVE_DMA) {
3307 			/*
3308 			 * use Multiword DMA.
3309 			 * Timings will be used for both PIO and DMA, so adjust
3310 			 * DMA mode if needed
3311 			 */
3312 			if (drvp->PIO_mode >= 3 &&
3313 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3314 				drvp->DMA_mode = drvp->PIO_mode - 2;
3315 			}
3316 			after = (sc->sc_wdcdev.nchannels == 2) ?
3317 			    hpt370_dma[drvp->DMA_mode] :
3318 			    hpt366_dma[drvp->DMA_mode];
3319 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3320 		} else {
3321 			/* PIO only */
3322 			after = (sc->sc_wdcdev.nchannels == 2) ?
3323 			    hpt370_pio[drvp->PIO_mode] :
3324 			    hpt366_pio[drvp->PIO_mode];
3325 		}
3326 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3327 		    HPT_IDETIM(chp->channel, drive), after);
3328 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3329 		    "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3330 		    after, before), DEBUG_PROBE);
3331 	}
3332 	if (idedma_ctl != 0) {
3333 		/* Add software bits in status register */
3334 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3335 		    IDEDMA_CTL, idedma_ctl);
3336 	}
3337 	pciide_print_modes(cp);
3338 }
3339 
3340 int
3341 hpt_pci_intr(arg)
3342 	void *arg;
3343 {
3344 	struct pciide_softc *sc = arg;
3345 	struct pciide_channel *cp;
3346 	struct channel_softc *wdc_cp;
3347 	int rv = 0;
3348 	int dmastat, i, crv;
3349 
3350 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3351 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3352 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3353 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3354 			continue;
3355 		cp = &sc->pciide_channels[i];
3356 		wdc_cp = &cp->wdc_channel;
3357 		crv = wdcintr(wdc_cp);
3358 		if (crv == 0) {
3359 			printf("%s:%d: bogus intr\n",
3360 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3361 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3362 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3363 		} else
3364 			rv = 1;
3365 	}
3366 	return rv;
3367 }
3368 
3369 
3370 /* Macros to test product */
3371 #define PDC_IS_262(sc)							\
3372 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||	\
3373 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3374 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3375 #define PDC_IS_265(sc)							\
3376 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3377 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3378 
3379 void
3380 pdc202xx_chip_map(sc, pa)
3381 	struct pciide_softc *sc;
3382 	struct pci_attach_args *pa;
3383 {
3384 	struct pciide_channel *cp;
3385 	int channel;
3386 	pcireg_t interface, st, mode;
3387 	bus_size_t cmdsize, ctlsize;
3388 
3389 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3390 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3391 	    DEBUG_PROBE);
3392 	if (pciide_chipen(sc, pa) == 0)
3393 		return;
3394 
3395 	/* turn off  RAID mode */
3396 	st &= ~PDC2xx_STATE_IDERAID;
3397 
3398 	/*
3399 	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3400 	 * mode. We have to fake interface
3401 	 */
3402 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3403 	if (st & PDC2xx_STATE_NATIVE)
3404 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3405 
3406 	printf("%s: bus-master DMA support present",
3407 	    sc->sc_wdcdev.sc_dev.dv_xname);
3408 	pciide_mapreg_dma(sc, pa);
3409 	printf("\n");
3410 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3411 	    WDC_CAPABILITY_MODE;
3412 	if (sc->sc_dma_ok) {
3413 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3414 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3415 		sc->sc_wdcdev.irqack = pciide_irqack;
3416 	}
3417 	sc->sc_wdcdev.PIO_cap = 4;
3418 	sc->sc_wdcdev.DMA_cap = 2;
3419 	if (PDC_IS_265(sc))
3420 		sc->sc_wdcdev.UDMA_cap = 5;
3421 	else if (PDC_IS_262(sc))
3422 		sc->sc_wdcdev.UDMA_cap = 4;
3423 	else
3424 		sc->sc_wdcdev.UDMA_cap = 2;
3425 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3426 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3427 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3428 
3429 	/* setup failsafe defaults */
3430 	mode = 0;
3431 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3432 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3433 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3434 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3435 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3436 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3437 		    "initial timings  0x%x, now 0x%x\n", channel,
3438 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3439 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3440 		    DEBUG_PROBE);
3441 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3442 		    mode | PDC2xx_TIM_IORDYp);
3443 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3444 		    "initial timings  0x%x, now 0x%x\n", channel,
3445 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3446 		    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3447 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3448 		    mode);
3449 	}
3450 
3451 	mode = PDC2xx_SCR_DMA;
3452 	if (PDC_IS_262(sc)) {
3453 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3454 	} else {
3455 		/* the BIOS set it up this way */
3456 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3457 	}
3458 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3459 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3460 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3461 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3462 	    DEBUG_PROBE);
3463 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3464 
3465 	/* controller initial state register is OK even without BIOS */
3466 	/* Set DMA mode to IDE DMA compatibility */
3467 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3468 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3469 	    DEBUG_PROBE);
3470 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3471 	    mode | 0x1);
3472 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3473 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3474 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3475 	    mode | 0x1);
3476 
3477 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3478 		cp = &sc->pciide_channels[channel];
3479 		if (pciide_chansetup(sc, channel, interface) == 0)
3480 			continue;
3481 		if ((st & (PDC_IS_262(sc) ?
3482 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3483 			printf("%s: %s channel ignored (disabled)\n",
3484 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3485 			continue;
3486 		}
3487 		if (PDC_IS_265(sc))
3488 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3489 			    pdc20265_pci_intr);
3490 		else
3491 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3492 			    pdc202xx_pci_intr);
3493 		if (cp->hw_ok == 0)
3494 			continue;
3495 		if (pciide_chan_candisable(cp))
3496 			st &= ~(PDC_IS_262(sc) ?
3497 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3498 		pciide_map_compat_intr(pa, cp, channel, interface);
3499 		pdc202xx_setup_channel(&cp->wdc_channel);
3500 	}
3501 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3502 	    DEBUG_PROBE);
3503 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3504 	return;
3505 }
3506 
3507 void
3508 pdc202xx_setup_channel(chp)
3509 	struct channel_softc *chp;
3510 {
3511 	struct ata_drive_datas *drvp;
3512 	int drive;
3513 	pcireg_t mode, st;
3514 	u_int32_t idedma_ctl, scr, atapi;
3515 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3516 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3517 	int channel = chp->channel;
3518 
3519 	/* setup DMA if needed */
3520 	pciide_channel_dma_setup(cp);
3521 
3522 	idedma_ctl = 0;
3523 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3524 	    sc->sc_wdcdev.sc_dev.dv_xname,
3525 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3526 	    DEBUG_PROBE);
3527 
3528 	/* Per channel settings */
3529 	if (PDC_IS_262(sc)) {
3530 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3531 		    PDC262_U66);
3532 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3533 		/* Trimm UDMA mode */
3534 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3535 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3536 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3537 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3538 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3539 			if (chp->ch_drive[0].UDMA_mode > 2)
3540 				chp->ch_drive[0].UDMA_mode = 2;
3541 			if (chp->ch_drive[1].UDMA_mode > 2)
3542 				chp->ch_drive[1].UDMA_mode = 2;
3543 		}
3544 		/* Set U66 if needed */
3545 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3546 		    chp->ch_drive[0].UDMA_mode > 2) ||
3547 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3548 		    chp->ch_drive[1].UDMA_mode > 2))
3549 			scr |= PDC262_U66_EN(channel);
3550 		else
3551 			scr &= ~PDC262_U66_EN(channel);
3552 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3553 		    PDC262_U66, scr);
3554 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3555 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3556 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3557 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3558 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3559 			chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3560 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3561 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3562 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3563 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3564 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3565 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3566 				atapi = 0;
3567 			else
3568 				atapi = PDC262_ATAPI_UDMA;
3569 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3570 			    PDC262_ATAPI(channel), atapi);
3571 		}
3572 	}
3573 	for (drive = 0; drive < 2; drive++) {
3574 		drvp = &chp->ch_drive[drive];
3575 		/* If no drive, skip */
3576 		if ((drvp->drive_flags & DRIVE) == 0)
3577 			continue;
3578 		mode = 0;
3579 		if (drvp->drive_flags & DRIVE_UDMA) {
3580 			/* use Ultra/DMA */
3581 			drvp->drive_flags &= ~DRIVE_DMA;
3582 			mode = PDC2xx_TIM_SET_MB(mode,
3583 			    pdc2xx_udma_mb[drvp->UDMA_mode]);
3584 			mode = PDC2xx_TIM_SET_MC(mode,
3585 			    pdc2xx_udma_mc[drvp->UDMA_mode]);
3586 			drvp->drive_flags &= ~DRIVE_DMA;
3587 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3588 		} else if (drvp->drive_flags & DRIVE_DMA) {
3589 			mode = PDC2xx_TIM_SET_MB(mode,
3590 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3591 			mode = PDC2xx_TIM_SET_MC(mode,
3592 			    pdc2xx_dma_mc[drvp->DMA_mode]);
3593 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3594 		} else {
3595 			mode = PDC2xx_TIM_SET_MB(mode,
3596 			    pdc2xx_dma_mb[0]);
3597 			mode = PDC2xx_TIM_SET_MC(mode,
3598 			    pdc2xx_dma_mc[0]);
3599 		}
3600 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3601 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3602 		if (drvp->drive_flags & DRIVE_ATA)
3603 			mode |= PDC2xx_TIM_PRE;
3604 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3605 		if (drvp->PIO_mode >= 3) {
3606 			mode |= PDC2xx_TIM_IORDY;
3607 			if (drive == 0)
3608 				mode |= PDC2xx_TIM_IORDYp;
3609 		}
3610 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3611 		    "timings 0x%x\n",
3612 		    sc->sc_wdcdev.sc_dev.dv_xname,
3613 		    chp->channel, drive, mode), DEBUG_PROBE);
3614 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3615 		    PDC2xx_TIM(chp->channel, drive), mode);
3616 	}
3617 	if (idedma_ctl != 0) {
3618 		/* Add software bits in status register */
3619 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3620 		    IDEDMA_CTL, idedma_ctl);
3621 	}
3622 	pciide_print_modes(cp);
3623 }
3624 
3625 int
3626 pdc202xx_pci_intr(arg)
3627 	void *arg;
3628 {
3629 	struct pciide_softc *sc = arg;
3630 	struct pciide_channel *cp;
3631 	struct channel_softc *wdc_cp;
3632 	int i, rv, crv;
3633 	u_int32_t scr;
3634 
3635 	rv = 0;
3636 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3637 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3638 		cp = &sc->pciide_channels[i];
3639 		wdc_cp = &cp->wdc_channel;
3640 		/* If a compat channel skip. */
3641 		if (cp->compat)
3642 			continue;
3643 		if (scr & PDC2xx_SCR_INT(i)) {
3644 			crv = wdcintr(wdc_cp);
3645 			if (crv == 0)
3646 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3647 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3648 			else
3649 				rv = 1;
3650 		}
3651 	}
3652 	return rv;
3653 }
3654 
3655 int
3656 pdc20265_pci_intr(arg)
3657 	void *arg;
3658 {
3659 	struct pciide_softc *sc = arg;
3660 	struct pciide_channel *cp;
3661 	struct channel_softc *wdc_cp;
3662 	int i, rv, crv;
3663 	u_int32_t dmastat;
3664 
3665 	rv = 0;
3666 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3667 		cp = &sc->pciide_channels[i];
3668 		wdc_cp = &cp->wdc_channel;
3669 		/* If a compat channel skip. */
3670 		if (cp->compat)
3671 			continue;
3672 		/*
3673 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3674 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3675 		 * So use it instead (requires 2 reg reads instead of 1,
3676 		 * but we can't do it another way).
3677 		 */
3678 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3679 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3680 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3681 			continue;
3682 		crv = wdcintr(wdc_cp);
3683 		if (crv == 0)
3684 			printf("%s:%d: bogus intr\n",
3685 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3686 		else
3687 			rv = 1;
3688 	}
3689 	return rv;
3690 }
3691 
3692 void
3693 opti_chip_map(sc, pa)
3694 	struct pciide_softc *sc;
3695 	struct pci_attach_args *pa;
3696 {
3697 	struct pciide_channel *cp;
3698 	bus_size_t cmdsize, ctlsize;
3699 	pcireg_t interface;
3700 	u_int8_t init_ctrl;
3701 	int channel;
3702 
3703 	if (pciide_chipen(sc, pa) == 0)
3704 		return;
3705 	printf("%s: bus-master DMA support present",
3706 	    sc->sc_wdcdev.sc_dev.dv_xname);
3707 
3708 	/*
3709 	 * XXXSCW:
3710 	 * There seem to be a couple of buggy revisions/implementations
3711 	 * of the OPTi pciide chipset. This kludge seems to fix one of
3712 	 * the reported problems (PR/11644) but still fails for the
3713 	 * other (PR/13151), although the latter may be due to other
3714 	 * issues too...
3715 	 */
3716 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
3717 		printf(" but disabled due to chip rev. <= 0x12");
3718 		sc->sc_dma_ok = 0;
3719 		sc->sc_wdcdev.cap = 0;
3720 	} else {
3721 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3722 		pciide_mapreg_dma(sc, pa);
3723 	}
3724 	printf("\n");
3725 
3726 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3727 	sc->sc_wdcdev.PIO_cap = 4;
3728 	if (sc->sc_dma_ok) {
3729 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3730 		sc->sc_wdcdev.irqack = pciide_irqack;
3731 		sc->sc_wdcdev.DMA_cap = 2;
3732 	}
3733 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3734 
3735 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3736 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3737 
3738 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3739 	    OPTI_REG_INIT_CONTROL);
3740 
3741 	interface = PCI_INTERFACE(pa->pa_class);
3742 
3743 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3744 		cp = &sc->pciide_channels[channel];
3745 		if (pciide_chansetup(sc, channel, interface) == 0)
3746 			continue;
3747 		if (channel == 1 &&
3748 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3749 			printf("%s: %s channel ignored (disabled)\n",
3750 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3751 			continue;
3752 		}
3753 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3754 		    pciide_pci_intr);
3755 		if (cp->hw_ok == 0)
3756 			continue;
3757 		pciide_map_compat_intr(pa, cp, channel, interface);
3758 		if (cp->hw_ok == 0)
3759 			continue;
3760 		opti_setup_channel(&cp->wdc_channel);
3761 	}
3762 }
3763 
3764 void
3765 opti_setup_channel(chp)
3766 	struct channel_softc *chp;
3767 {
3768 	struct ata_drive_datas *drvp;
3769 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3770 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3771 	int drive, spd;
3772 	int mode[2];
3773 	u_int8_t rv, mr;
3774 
3775 	/*
3776 	 * The `Delay' and `Address Setup Time' fields of the
3777 	 * Miscellaneous Register are always zero initially.
3778 	 */
3779 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3780 	mr &= ~(OPTI_MISC_DELAY_MASK |
3781 		OPTI_MISC_ADDR_SETUP_MASK |
3782 		OPTI_MISC_INDEX_MASK);
3783 
3784 	/* Prime the control register before setting timing values */
3785 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3786 
3787 	/* Determine the clockrate of the PCIbus the chip is attached to */
3788 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3789 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3790 
3791 	/* setup DMA if needed */
3792 	pciide_channel_dma_setup(cp);
3793 
3794 	for (drive = 0; drive < 2; drive++) {
3795 		drvp = &chp->ch_drive[drive];
3796 		/* If no drive, skip */
3797 		if ((drvp->drive_flags & DRIVE) == 0) {
3798 			mode[drive] = -1;
3799 			continue;
3800 		}
3801 
3802 		if ((drvp->drive_flags & DRIVE_DMA)) {
3803 			/*
3804 			 * Timings will be used for both PIO and DMA,
3805 			 * so adjust DMA mode if needed
3806 			 */
3807 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3808 				drvp->PIO_mode = drvp->DMA_mode + 2;
3809 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3810 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3811 				    drvp->PIO_mode - 2 : 0;
3812 			if (drvp->DMA_mode == 0)
3813 				drvp->PIO_mode = 0;
3814 
3815 			mode[drive] = drvp->DMA_mode + 5;
3816 		} else
3817 			mode[drive] = drvp->PIO_mode;
3818 
3819 		if (drive && mode[0] >= 0 &&
3820 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3821 			/*
3822 			 * Can't have two drives using different values
3823 			 * for `Address Setup Time'.
3824 			 * Slow down the faster drive to compensate.
3825 			 */
3826 			int d = (opti_tim_as[spd][mode[0]] >
3827 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3828 
3829 			mode[d] = mode[1-d];
3830 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3831 			chp->ch_drive[d].DMA_mode = 0;
3832 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3833 		}
3834 	}
3835 
3836 	for (drive = 0; drive < 2; drive++) {
3837 		int m;
3838 		if ((m = mode[drive]) < 0)
3839 			continue;
3840 
3841 		/* Set the Address Setup Time and select appropriate index */
3842 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3843 		rv |= OPTI_MISC_INDEX(drive);
3844 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3845 
3846 		/* Set the pulse width and recovery timing parameters */
3847 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3848 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3849 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3850 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3851 
3852 		/* Set the Enhanced Mode register appropriately */
3853 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3854 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3855 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3856 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3857 	}
3858 
3859 	/* Finally, enable the timings */
3860 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3861 
3862 	pciide_print_modes(cp);
3863 }
3864 
3865 #define	ACARD_IS_850(sc)						\
3866 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3867 
3868 void
3869 acard_chip_map(sc, pa)
3870 	struct pciide_softc *sc;
3871 	struct pci_attach_args *pa;
3872 {
3873 	struct pciide_channel *cp;
3874 	int i;
3875 	pcireg_t interface;
3876 	bus_size_t cmdsize, ctlsize;
3877 
3878 	if (pciide_chipen(sc, pa) == 0)
3879 		return;
3880 
3881 	/*
3882 	 * when the chip is in native mode it identifies itself as a
3883 	 * 'misc mass storage'. Fake interface in this case.
3884 	 */
3885 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3886 		interface = PCI_INTERFACE(pa->pa_class);
3887 	} else {
3888 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3889 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3890 	}
3891 
3892 	printf("%s: bus-master DMA support present",
3893 	    sc->sc_wdcdev.sc_dev.dv_xname);
3894 	pciide_mapreg_dma(sc, pa);
3895 	printf("\n");
3896 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3897 	    WDC_CAPABILITY_MODE;
3898 
3899 	if (sc->sc_dma_ok) {
3900 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3901 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3902 		sc->sc_wdcdev.irqack = pciide_irqack;
3903 	}
3904 	sc->sc_wdcdev.PIO_cap = 4;
3905 	sc->sc_wdcdev.DMA_cap = 2;
3906 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3907 
3908 	sc->sc_wdcdev.set_modes = acard_setup_channel;
3909 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3910 	sc->sc_wdcdev.nchannels = 2;
3911 
3912 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3913 		cp = &sc->pciide_channels[i];
3914 		if (pciide_chansetup(sc, i, interface) == 0)
3915 			continue;
3916 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3917 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3918 			    &ctlsize, pciide_pci_intr);
3919 		} else {
3920 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3921 			    &cmdsize, &ctlsize);
3922 		}
3923 		if (cp->hw_ok == 0)
3924 			return;
3925 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3926 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3927 		wdcattach(&cp->wdc_channel);
3928 		acard_setup_channel(&cp->wdc_channel);
3929 	}
3930 	if (!ACARD_IS_850(sc)) {
3931 		u_int32_t reg;
3932 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3933 		reg &= ~ATP860_CTRL_INT;
3934 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3935 	}
3936 }
3937 
3938 void
3939 acard_setup_channel(chp)
3940 	struct channel_softc *chp;
3941 {
3942 	struct ata_drive_datas *drvp;
3943 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3944 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3945 	int channel = chp->channel;
3946 	int drive;
3947 	u_int32_t idetime, udma_mode;
3948 	u_int32_t idedma_ctl;
3949 
3950 	/* setup DMA if needed */
3951 	pciide_channel_dma_setup(cp);
3952 
3953 	if (ACARD_IS_850(sc)) {
3954 		idetime = 0;
3955 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3956 		udma_mode &= ~ATP850_UDMA_MASK(channel);
3957 	} else {
3958 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3959 		idetime &= ~ATP860_SETTIME_MASK(channel);
3960 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3961 		udma_mode &= ~ATP860_UDMA_MASK(channel);
3962 
3963 		/* check 80 pins cable */
3964 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3965 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3966 			if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3967 			    & ATP860_CTRL_80P(chp->channel)) {
3968 				if (chp->ch_drive[0].UDMA_mode > 2)
3969 					chp->ch_drive[0].UDMA_mode = 2;
3970 				if (chp->ch_drive[1].UDMA_mode > 2)
3971 					chp->ch_drive[1].UDMA_mode = 2;
3972 			}
3973 		}
3974 	}
3975 
3976 	idedma_ctl = 0;
3977 
3978 	/* Per drive settings */
3979 	for (drive = 0; drive < 2; drive++) {
3980 		drvp = &chp->ch_drive[drive];
3981 		/* If no drive, skip */
3982 		if ((drvp->drive_flags & DRIVE) == 0)
3983 			continue;
3984 		/* add timing values, setup DMA if needed */
3985 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
3986 		    (drvp->drive_flags & DRIVE_UDMA)) {
3987 			/* use Ultra/DMA */
3988 			if (ACARD_IS_850(sc)) {
3989 				idetime |= ATP850_SETTIME(drive,
3990 				    acard_act_udma[drvp->UDMA_mode],
3991 				    acard_rec_udma[drvp->UDMA_mode]);
3992 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
3993 				    acard_udma_conf[drvp->UDMA_mode]);
3994 			} else {
3995 				idetime |= ATP860_SETTIME(channel, drive,
3996 				    acard_act_udma[drvp->UDMA_mode],
3997 				    acard_rec_udma[drvp->UDMA_mode]);
3998 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
3999 				    acard_udma_conf[drvp->UDMA_mode]);
4000 			}
4001 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4002 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4003 		    (drvp->drive_flags & DRIVE_DMA)) {
4004 			/* use Multiword DMA */
4005 			drvp->drive_flags &= ~DRIVE_UDMA;
4006 			if (ACARD_IS_850(sc)) {
4007 				idetime |= ATP850_SETTIME(drive,
4008 				    acard_act_dma[drvp->DMA_mode],
4009 				    acard_rec_dma[drvp->DMA_mode]);
4010 			} else {
4011 				idetime |= ATP860_SETTIME(channel, drive,
4012 				    acard_act_dma[drvp->DMA_mode],
4013 				    acard_rec_dma[drvp->DMA_mode]);
4014 			}
4015 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4016 		} else {
4017 			/* PIO only */
4018 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4019 			if (ACARD_IS_850(sc)) {
4020 				idetime |= ATP850_SETTIME(drive,
4021 				    acard_act_pio[drvp->PIO_mode],
4022 				    acard_rec_pio[drvp->PIO_mode]);
4023 			} else {
4024 				idetime |= ATP860_SETTIME(channel, drive,
4025 				    acard_act_pio[drvp->PIO_mode],
4026 				    acard_rec_pio[drvp->PIO_mode]);
4027 			}
4028 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4029 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4030 		    | ATP8x0_CTRL_EN(channel));
4031 		}
4032 	}
4033 
4034 	if (idedma_ctl != 0) {
4035 		/* Add software bits in status register */
4036 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4037 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4038 	}
4039 	pciide_print_modes(cp);
4040 
4041 	if (ACARD_IS_850(sc)) {
4042 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4043 		    ATP850_IDETIME(channel), idetime);
4044 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4045 	} else {
4046 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4047 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4048 	}
4049 }
4050 
4051 int
4052 acard_pci_intr(arg)
4053 	void *arg;
4054 {
4055 	struct pciide_softc *sc = arg;
4056 	struct pciide_channel *cp;
4057 	struct channel_softc *wdc_cp;
4058 	int rv = 0;
4059 	int dmastat, i, crv;
4060 
4061 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4062 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4063 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4064 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4065 			continue;
4066 		cp = &sc->pciide_channels[i];
4067 		wdc_cp = &cp->wdc_channel;
4068 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4069 			(void)wdcintr(wdc_cp);
4070 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4071 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4072 			continue;
4073 		}
4074 		crv = wdcintr(wdc_cp);
4075 		if (crv == 0)
4076 			printf("%s:%d: bogus intr\n",
4077 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4078 		else if (crv == 1)
4079 			rv = 1;
4080 		else if (rv == 0)
4081 			rv = crv;
4082 	}
4083 	return rv;
4084 }
4085