xref: /netbsd-src/sys/dev/pci/pciide.c (revision 06be8101a16cc95f40783b3cb7afd12112103a9a)
1 /*	$NetBSD: pciide.c,v 1.134 2001/11/13 07:48:48 lukem Exp $	*/
2 
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 
37 /*
38  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  * 1. Redistributions of source code must retain the above copyright
44  *    notice, this list of conditions and the following disclaimer.
45  * 2. Redistributions in binary form must reproduce the above copyright
46  *    notice, this list of conditions and the following disclaimer in the
47  *    documentation and/or other materials provided with the distribution.
48  * 3. All advertising materials mentioning features or use of this software
49  *    must display the following acknowledgement:
50  *      This product includes software developed by Christopher G. Demetriou
51  *	for the NetBSD Project.
52  * 4. The name of the author may not be used to endorse or promote products
53  *    derived from this software without specific prior written permission
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
56  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
59  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
60  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
64  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  */
66 
67 /*
68  * PCI IDE controller driver.
69  *
70  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
71  * sys/dev/pci/ppb.c, revision 1.16).
72  *
73  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
74  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
75  * 5/16/94" from the PCI SIG.
76  *
77  */
78 
79 #include <sys/cdefs.h>
80 __KERNEL_RCSID(0, "$NetBSD: pciide.c,v 1.134 2001/11/13 07:48:48 lukem Exp $");
81 
82 #ifndef WDCDEBUG
83 #define WDCDEBUG
84 #endif
85 
86 #define DEBUG_DMA   0x01
87 #define DEBUG_XFERS  0x02
88 #define DEBUG_FUNCS  0x08
89 #define DEBUG_PROBE  0x10
90 #ifdef WDCDEBUG
91 int wdcdebug_pciide_mask = 0;
92 #define WDCDEBUG_PRINT(args, level) \
93 	if (wdcdebug_pciide_mask & (level)) printf args
94 #else
95 #define WDCDEBUG_PRINT(args, level)
96 #endif
97 #include <sys/param.h>
98 #include <sys/systm.h>
99 #include <sys/device.h>
100 #include <sys/malloc.h>
101 
102 #include <uvm/uvm_extern.h>
103 
104 #include <machine/endian.h>
105 
106 #include <dev/pci/pcireg.h>
107 #include <dev/pci/pcivar.h>
108 #include <dev/pci/pcidevs.h>
109 #include <dev/pci/pciidereg.h>
110 #include <dev/pci/pciidevar.h>
111 #include <dev/pci/pciide_piix_reg.h>
112 #include <dev/pci/pciide_amd_reg.h>
113 #include <dev/pci/pciide_apollo_reg.h>
114 #include <dev/pci/pciide_cmd_reg.h>
115 #include <dev/pci/pciide_cy693_reg.h>
116 #include <dev/pci/pciide_sis_reg.h>
117 #include <dev/pci/pciide_acer_reg.h>
118 #include <dev/pci/pciide_pdc202xx_reg.h>
119 #include <dev/pci/pciide_opti_reg.h>
120 #include <dev/pci/pciide_hpt_reg.h>
121 #include <dev/pci/pciide_acard_reg.h>
122 #include <dev/pci/cy82c693var.h>
123 
124 #include "opt_pciide.h"
125 
126 /* inlines for reading/writing 8-bit PCI registers */
127 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
128 					      int));
129 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
130 					   int, u_int8_t));
131 
132 static __inline u_int8_t
133 pciide_pci_read(pc, pa, reg)
134 	pci_chipset_tag_t pc;
135 	pcitag_t pa;
136 	int reg;
137 {
138 
139 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
140 	    ((reg & 0x03) * 8) & 0xff);
141 }
142 
143 static __inline void
144 pciide_pci_write(pc, pa, reg, val)
145 	pci_chipset_tag_t pc;
146 	pcitag_t pa;
147 	int reg;
148 	u_int8_t val;
149 {
150 	pcireg_t pcival;
151 
152 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
153 	pcival &= ~(0xff << ((reg & 0x03) * 8));
154 	pcival |= (val << ((reg & 0x03) * 8));
155 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
156 }
157 
158 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
159 
160 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
161 void piix_setup_channel __P((struct channel_softc*));
162 void piix3_4_setup_channel __P((struct channel_softc*));
163 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
164 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
165 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
166 
167 void amd7x6_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
168 void amd7x6_setup_channel __P((struct channel_softc*));
169 
170 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
171 void apollo_setup_channel __P((struct channel_softc*));
172 
173 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
174 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
175 void cmd0643_9_setup_channel __P((struct channel_softc*));
176 void cmd_channel_map __P((struct pci_attach_args *,
177 			struct pciide_softc *, int));
178 int  cmd_pci_intr __P((void *));
179 void cmd646_9_irqack __P((struct channel_softc *));
180 
181 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
182 void cy693_setup_channel __P((struct channel_softc*));
183 
184 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
185 void sis_setup_channel __P((struct channel_softc*));
186 static int sis_hostbr_match __P(( struct pci_attach_args *));
187 
188 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
189 void acer_setup_channel __P((struct channel_softc*));
190 int  acer_pci_intr __P((void *));
191 static int acer_isabr_match __P(( struct pci_attach_args *));
192 
193 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void pdc202xx_setup_channel __P((struct channel_softc*));
195 int  pdc202xx_pci_intr __P((void *));
196 int  pdc20265_pci_intr __P((void *));
197 
198 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
199 void opti_setup_channel __P((struct channel_softc*));
200 
201 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void hpt_setup_channel __P((struct channel_softc*));
203 int  hpt_pci_intr __P((void *));
204 
205 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void acard_setup_channel __P((struct channel_softc*));
207 int  acard_pci_intr __P((void *));
208 
209 #ifdef PCIIDE_WINBOND_ENABLE
210 void winbond_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
211 #endif
212 
213 void pciide_channel_dma_setup __P((struct pciide_channel *));
214 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
215 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
216 void pciide_dma_start __P((void*, int, int));
217 int  pciide_dma_finish __P((void*, int, int, int));
218 void pciide_irqack __P((struct channel_softc *));
219 void pciide_print_modes __P((struct pciide_channel *));
220 
221 struct pciide_product_desc {
222 	u_int32_t ide_product;
223 	int ide_flags;
224 	const char *ide_name;
225 	/* map and setup chip, probe drives */
226 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
227 };
228 
229 /* Flags for ide_flags */
230 #define IDE_PCI_CLASS_OVERRIDE	0x0001 /* accept even if class != pciide */
231 #define	IDE_16BIT_IOSPACE	0x0002 /* I/O space BARS ignore upper word */
232 
233 /* Default product description for devices not known from this controller */
234 const struct pciide_product_desc default_product_desc = {
235 	0,
236 	0,
237 	"Generic PCI IDE controller",
238 	default_chip_map,
239 };
240 
241 const struct pciide_product_desc pciide_intel_products[] =  {
242 	{ PCI_PRODUCT_INTEL_82092AA,
243 	  0,
244 	  "Intel 82092AA IDE controller",
245 	  default_chip_map,
246 	},
247 	{ PCI_PRODUCT_INTEL_82371FB_IDE,
248 	  0,
249 	  "Intel 82371FB IDE controller (PIIX)",
250 	  piix_chip_map,
251 	},
252 	{ PCI_PRODUCT_INTEL_82371SB_IDE,
253 	  0,
254 	  "Intel 82371SB IDE Interface (PIIX3)",
255 	  piix_chip_map,
256 	},
257 	{ PCI_PRODUCT_INTEL_82371AB_IDE,
258 	  0,
259 	  "Intel 82371AB IDE controller (PIIX4)",
260 	  piix_chip_map,
261 	},
262 	{ PCI_PRODUCT_INTEL_82440MX_IDE,
263 	  0,
264 	  "Intel 82440MX IDE controller",
265 	  piix_chip_map
266 	},
267 	{ PCI_PRODUCT_INTEL_82801AA_IDE,
268 	  0,
269 	  "Intel 82801AA IDE Controller (ICH)",
270 	  piix_chip_map,
271 	},
272 	{ PCI_PRODUCT_INTEL_82801AB_IDE,
273 	  0,
274 	  "Intel 82801AB IDE Controller (ICH0)",
275 	  piix_chip_map,
276 	},
277 	{ PCI_PRODUCT_INTEL_82801BA_IDE,
278 	  0,
279 	  "Intel 82801BA IDE Controller (ICH2)",
280 	  piix_chip_map,
281 	},
282 	{ PCI_PRODUCT_INTEL_82801BAM_IDE,
283 	  0,
284 	  "Intel 82801BAM IDE Controller (ICH2)",
285 	  piix_chip_map,
286 	},
287 	{ 0,
288 	  0,
289 	  NULL,
290 	  NULL
291 	}
292 };
293 
294 const struct pciide_product_desc pciide_amd_products[] =  {
295 	{ PCI_PRODUCT_AMD_PBC756_IDE,
296 	  0,
297 	  "Advanced Micro Devices AMD756 IDE Controller",
298 	  amd7x6_chip_map
299 	},
300 	{ PCI_PRODUCT_AMD_PBC766_IDE,
301 	  0,
302 	  "Advanced Micro Devices AMD766 IDE Controller",
303 	  amd7x6_chip_map
304 	},
305 	{ 0,
306 	  0,
307 	  NULL,
308 	  NULL
309 	}
310 };
311 
312 const struct pciide_product_desc pciide_cmd_products[] =  {
313 	{ PCI_PRODUCT_CMDTECH_640,
314 	  0,
315 	  "CMD Technology PCI0640",
316 	  cmd_chip_map
317 	},
318 	{ PCI_PRODUCT_CMDTECH_643,
319 	  0,
320 	  "CMD Technology PCI0643",
321 	  cmd0643_9_chip_map,
322 	},
323 	{ PCI_PRODUCT_CMDTECH_646,
324 	  0,
325 	  "CMD Technology PCI0646",
326 	  cmd0643_9_chip_map,
327 	},
328 	{ PCI_PRODUCT_CMDTECH_648,
329 	  IDE_PCI_CLASS_OVERRIDE,
330 	  "CMD Technology PCI0648",
331 	  cmd0643_9_chip_map,
332 	},
333 	{ PCI_PRODUCT_CMDTECH_649,
334 	  IDE_PCI_CLASS_OVERRIDE,
335 	  "CMD Technology PCI0649",
336 	  cmd0643_9_chip_map,
337 	},
338 	{ 0,
339 	  0,
340 	  NULL,
341 	  NULL
342 	}
343 };
344 
345 const struct pciide_product_desc pciide_via_products[] =  {
346 	{ PCI_PRODUCT_VIATECH_VT82C586_IDE,
347 	  0,
348 	  NULL,
349 	  apollo_chip_map,
350 	 },
351 	{ PCI_PRODUCT_VIATECH_VT82C586A_IDE,
352 	  0,
353 	  NULL,
354 	  apollo_chip_map,
355 	},
356 	{ 0,
357 	  0,
358 	  NULL,
359 	  NULL
360 	}
361 };
362 
363 const struct pciide_product_desc pciide_cypress_products[] =  {
364 	{ PCI_PRODUCT_CONTAQ_82C693,
365 	  IDE_16BIT_IOSPACE,
366 	  "Cypress 82C693 IDE Controller",
367 	  cy693_chip_map,
368 	},
369 	{ 0,
370 	  0,
371 	  NULL,
372 	  NULL
373 	}
374 };
375 
376 const struct pciide_product_desc pciide_sis_products[] =  {
377 	{ PCI_PRODUCT_SIS_5597_IDE,
378 	  0,
379 	  "Silicon Integrated System 5597/5598 IDE controller",
380 	  sis_chip_map,
381 	},
382 	{ 0,
383 	  0,
384 	  NULL,
385 	  NULL
386 	}
387 };
388 
389 const struct pciide_product_desc pciide_acer_products[] =  {
390 	{ PCI_PRODUCT_ALI_M5229,
391 	  0,
392 	  "Acer Labs M5229 UDMA IDE Controller",
393 	  acer_chip_map,
394 	},
395 	{ 0,
396 	  0,
397 	  NULL,
398 	  NULL
399 	}
400 };
401 
402 const struct pciide_product_desc pciide_promise_products[] =  {
403 	{ PCI_PRODUCT_PROMISE_ULTRA33,
404 	  IDE_PCI_CLASS_OVERRIDE,
405 	  "Promise Ultra33/ATA Bus Master IDE Accelerator",
406 	  pdc202xx_chip_map,
407 	},
408 	{ PCI_PRODUCT_PROMISE_ULTRA66,
409 	  IDE_PCI_CLASS_OVERRIDE,
410 	  "Promise Ultra66/ATA Bus Master IDE Accelerator",
411 	  pdc202xx_chip_map,
412 	},
413 	{ PCI_PRODUCT_PROMISE_ULTRA100,
414 	  IDE_PCI_CLASS_OVERRIDE,
415 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
416 	  pdc202xx_chip_map,
417 	},
418 	{ PCI_PRODUCT_PROMISE_ULTRA100X,
419 	  IDE_PCI_CLASS_OVERRIDE,
420 	  "Promise Ultra100/ATA Bus Master IDE Accelerator",
421 	  pdc202xx_chip_map,
422 	},
423 	{ 0,
424 	  0,
425 	  NULL,
426 	  NULL
427 	}
428 };
429 
430 const struct pciide_product_desc pciide_opti_products[] =  {
431 	{ PCI_PRODUCT_OPTI_82C621,
432 	  0,
433 	  "OPTi 82c621 PCI IDE controller",
434 	  opti_chip_map,
435 	},
436 	{ PCI_PRODUCT_OPTI_82C568,
437 	  0,
438 	  "OPTi 82c568 (82c621 compatible) PCI IDE controller",
439 	  opti_chip_map,
440 	},
441 	{ PCI_PRODUCT_OPTI_82D568,
442 	  0,
443 	  "OPTi 82d568 (82c621 compatible) PCI IDE controller",
444 	  opti_chip_map,
445 	},
446 	{ 0,
447 	  0,
448 	  NULL,
449 	  NULL
450 	}
451 };
452 
453 const struct pciide_product_desc pciide_triones_products[] =  {
454 	{ PCI_PRODUCT_TRIONES_HPT366,
455 	  IDE_PCI_CLASS_OVERRIDE,
456 	  NULL,
457 	  hpt_chip_map,
458 	},
459 	{ 0,
460 	  0,
461 	  NULL,
462 	  NULL
463 	}
464 };
465 
466 const struct pciide_product_desc pciide_acard_products[] =  {
467 	{ PCI_PRODUCT_ACARD_ATP850U,
468 	  IDE_PCI_CLASS_OVERRIDE,
469 	  "Acard ATP850U Ultra33 IDE Controller",
470 	  acard_chip_map,
471 	},
472 	{ PCI_PRODUCT_ACARD_ATP860,
473 	  IDE_PCI_CLASS_OVERRIDE,
474 	  "Acard ATP860 Ultra66 IDE Controller",
475 	  acard_chip_map,
476 	},
477 	{ PCI_PRODUCT_ACARD_ATP860A,
478 	  IDE_PCI_CLASS_OVERRIDE,
479 	  "Acard ATP860-A Ultra66 IDE Controller",
480 	  acard_chip_map,
481 	},
482 	{ 0,
483 	  0,
484 	  NULL,
485 	  NULL
486 	}
487 };
488 
489 #ifdef PCIIDE_SERVERWORKS_ENABLE
490 const struct pciide_product_desc pciide_serverworks_products[] =  {
491 	{ PCI_PRODUCT_SERVERWORKS_IDE,
492 	  0,
493 	  "ServerWorks ROSB4 IDE Controller",
494 	  piix_chip_map,
495 	},
496 	{ 0,
497 	  0,
498 	  NULL,
499 	}
500 };
501 #endif
502 
503 #ifdef PCIIDE_WINBOND_ENABLE
504 const struct pciide_product_desc pciide_winbond_products[] =  {
505 	{ PCI_PRODUCT_WINBOND_W83C553F_1,
506 	  0,
507 	  "Winbond W83C553F IDE controller",
508 	  winbond_chip_map,
509 	},
510 	{ 0,
511 	  0,
512 	  NULL,
513 	}
514 };
515 #endif
516 
517 struct pciide_vendor_desc {
518 	u_int32_t ide_vendor;
519 	const struct pciide_product_desc *ide_products;
520 };
521 
522 const struct pciide_vendor_desc pciide_vendors[] = {
523 	{ PCI_VENDOR_INTEL, pciide_intel_products },
524 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products },
525 	{ PCI_VENDOR_VIATECH, pciide_via_products },
526 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products },
527 	{ PCI_VENDOR_SIS, pciide_sis_products },
528 	{ PCI_VENDOR_ALI, pciide_acer_products },
529 	{ PCI_VENDOR_PROMISE, pciide_promise_products },
530 	{ PCI_VENDOR_AMD, pciide_amd_products },
531 	{ PCI_VENDOR_OPTI, pciide_opti_products },
532 	{ PCI_VENDOR_TRIONES, pciide_triones_products },
533 	{ PCI_VENDOR_ACARD, pciide_acard_products },
534 #ifdef PCIIDE_SERVERWORKS_ENABLE
535 	{ PCI_VENDOR_SERVERWORKS, pciide_serverworks_products },
536 #endif
537 #ifdef PCIIDE_WINBOND_ENABLE
538 	{ PCI_VENDOR_WINBOND, pciide_winbond_products },
539 #endif
540 	{ 0, NULL }
541 };
542 
543 /* options passed via the 'flags' config keyword */
544 #define	PCIIDE_OPTIONS_DMA	0x01
545 #define	PCIIDE_OPTIONS_NODMA	0x02
546 
547 int	pciide_match __P((struct device *, struct cfdata *, void *));
548 void	pciide_attach __P((struct device *, struct device *, void *));
549 
550 struct cfattach pciide_ca = {
551 	sizeof(struct pciide_softc), pciide_match, pciide_attach
552 };
553 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
554 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
555 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
556 int	pciide_mapregs_native __P((struct pci_attach_args *,
557 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
558 	    int (*pci_intr) __P((void *))));
559 void	pciide_mapreg_dma __P((struct pciide_softc *,
560 	    struct pci_attach_args *));
561 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
562 void	pciide_mapchan __P((struct pci_attach_args *,
563 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
564 	    int (*pci_intr) __P((void *))));
565 int	pciide_chan_candisable __P((struct pciide_channel *));
566 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
567 	    struct pciide_channel *, int, int));
568 int	pciide_compat_intr __P((void *));
569 int	pciide_pci_intr __P((void *));
570 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
571 
572 const struct pciide_product_desc *
573 pciide_lookup_product(id)
574 	u_int32_t id;
575 {
576 	const struct pciide_product_desc *pp;
577 	const struct pciide_vendor_desc *vp;
578 
579 	for (vp = pciide_vendors; vp->ide_products != NULL; vp++)
580 		if (PCI_VENDOR(id) == vp->ide_vendor)
581 			break;
582 
583 	if ((pp = vp->ide_products) == NULL)
584 		return NULL;
585 
586 	for (; pp->chip_map != NULL; pp++)
587 		if (PCI_PRODUCT(id) == pp->ide_product)
588 			break;
589 
590 	if (pp->chip_map == NULL)
591 		return NULL;
592 	return pp;
593 }
594 
595 int
596 pciide_match(parent, match, aux)
597 	struct device *parent;
598 	struct cfdata *match;
599 	void *aux;
600 {
601 	struct pci_attach_args *pa = aux;
602 	const struct pciide_product_desc *pp;
603 
604 	/*
605 	 * Check the ID register to see that it's a PCI IDE controller.
606 	 * If it is, we assume that we can deal with it; it _should_
607 	 * work in a standardized way...
608 	 */
609 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
610 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
611 		return (1);
612 	}
613 
614 	/*
615 	 * Some controllers (e.g. promise Utra-33) don't claim to be PCI IDE
616 	 * controllers. Let see if we can deal with it anyway.
617 	 */
618 	pp = pciide_lookup_product(pa->pa_id);
619 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
620 		return (1);
621 	}
622 
623 	return (0);
624 }
625 
626 void
627 pciide_attach(parent, self, aux)
628 	struct device *parent, *self;
629 	void *aux;
630 {
631 	struct pci_attach_args *pa = aux;
632 	pci_chipset_tag_t pc = pa->pa_pc;
633 	pcitag_t tag = pa->pa_tag;
634 	struct pciide_softc *sc = (struct pciide_softc *)self;
635 	pcireg_t csr;
636 	char devinfo[256];
637 	const char *displaydev;
638 
639 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
640 	if (sc->sc_pp == NULL) {
641 		sc->sc_pp = &default_product_desc;
642 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
643 		displaydev = devinfo;
644 	} else
645 		displaydev = sc->sc_pp->ide_name;
646 
647 	/* if displaydev == NULL, printf is done in chip-specific map */
648 	if (displaydev)
649 		printf(": %s (rev. 0x%02x)\n", displaydev,
650 		    PCI_REVISION(pa->pa_class));
651 
652 	sc->sc_pc = pa->pa_pc;
653 	sc->sc_tag = pa->pa_tag;
654 #ifdef WDCDEBUG
655 	if (wdcdebug_pciide_mask & DEBUG_PROBE)
656 		pci_conf_print(sc->sc_pc, sc->sc_tag, NULL);
657 #endif
658 	sc->sc_pp->chip_map(sc, pa);
659 
660 	if (sc->sc_dma_ok) {
661 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
662 		csr |= PCI_COMMAND_MASTER_ENABLE;
663 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
664 	}
665 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
666 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
667 }
668 
669 /* tell wether the chip is enabled or not */
670 int
671 pciide_chipen(sc, pa)
672 	struct pciide_softc *sc;
673 	struct pci_attach_args *pa;
674 {
675 	pcireg_t csr;
676 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
677 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
678 		    PCI_COMMAND_STATUS_REG);
679 		printf("%s: device disabled (at %s)\n",
680 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
681 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
682 		  "device" : "bridge");
683 		return 0;
684 	}
685 	return 1;
686 }
687 
688 int
689 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
690 	struct pci_attach_args *pa;
691 	struct pciide_channel *cp;
692 	int compatchan;
693 	bus_size_t *cmdsizep, *ctlsizep;
694 {
695 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
696 	struct channel_softc *wdc_cp = &cp->wdc_channel;
697 
698 	cp->compat = 1;
699 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
700 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
701 
702 	wdc_cp->cmd_iot = pa->pa_iot;
703 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
704 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
705 		printf("%s: couldn't map %s channel cmd regs\n",
706 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
707 		return (0);
708 	}
709 
710 	wdc_cp->ctl_iot = pa->pa_iot;
711 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
712 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
713 		printf("%s: couldn't map %s channel ctl regs\n",
714 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
715 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
716 		    PCIIDE_COMPAT_CMD_SIZE);
717 		return (0);
718 	}
719 
720 	return (1);
721 }
722 
723 int
724 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
725 	struct pci_attach_args * pa;
726 	struct pciide_channel *cp;
727 	bus_size_t *cmdsizep, *ctlsizep;
728 	int (*pci_intr) __P((void *));
729 {
730 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
731 	struct channel_softc *wdc_cp = &cp->wdc_channel;
732 	const char *intrstr;
733 	pci_intr_handle_t intrhandle;
734 
735 	cp->compat = 0;
736 
737 	if (sc->sc_pci_ih == NULL) {
738 		if (pci_intr_map(pa, &intrhandle) != 0) {
739 			printf("%s: couldn't map native-PCI interrupt\n",
740 			    sc->sc_wdcdev.sc_dev.dv_xname);
741 			return 0;
742 		}
743 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
744 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
745 		    intrhandle, IPL_BIO, pci_intr, sc);
746 		if (sc->sc_pci_ih != NULL) {
747 			printf("%s: using %s for native-PCI interrupt\n",
748 			    sc->sc_wdcdev.sc_dev.dv_xname,
749 			    intrstr ? intrstr : "unknown interrupt");
750 		} else {
751 			printf("%s: couldn't establish native-PCI interrupt",
752 			    sc->sc_wdcdev.sc_dev.dv_xname);
753 			if (intrstr != NULL)
754 				printf(" at %s", intrstr);
755 			printf("\n");
756 			return 0;
757 		}
758 	}
759 	cp->ih = sc->sc_pci_ih;
760 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
761 	    PCI_MAPREG_TYPE_IO, 0,
762 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep) != 0) {
763 		printf("%s: couldn't map %s channel cmd regs\n",
764 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
765 		return 0;
766 	}
767 
768 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
769 	    PCI_MAPREG_TYPE_IO, 0,
770 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep) != 0) {
771 		printf("%s: couldn't map %s channel ctl regs\n",
772 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
773 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
774 		return 0;
775 	}
776 	/*
777 	 * In native mode, 4 bytes of I/O space are mapped for the control
778 	 * register, the control register is at offset 2. Pass the generic
779 	 * code a handle for only one byte at the rigth offset.
780 	 */
781 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
782 	    &wdc_cp->ctl_ioh) != 0) {
783 		printf("%s: unable to subregion %s channel ctl regs\n",
784 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
785 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
786 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
787 		return 0;
788 	}
789 	return (1);
790 }
791 
792 void
793 pciide_mapreg_dma(sc, pa)
794 	struct pciide_softc *sc;
795 	struct pci_attach_args *pa;
796 {
797 	pcireg_t maptype;
798 	bus_addr_t addr;
799 
800 	/*
801 	 * Map DMA registers
802 	 *
803 	 * Note that sc_dma_ok is the right variable to test to see if
804 	 * DMA can be done.  If the interface doesn't support DMA,
805 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
806 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
807 	 * non-zero if the interface supports DMA and the registers
808 	 * could be mapped.
809 	 *
810 	 * XXX Note that despite the fact that the Bus Master IDE specs
811 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
812 	 * XXX space," some controllers (at least the United
813 	 * XXX Microelectronics UM8886BF) place it in memory space.
814 	 */
815 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
816 	    PCIIDE_REG_BUS_MASTER_DMA);
817 
818 	switch (maptype) {
819 	case PCI_MAPREG_TYPE_IO:
820 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
821 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
822 		    &addr, NULL, NULL) == 0);
823 		if (sc->sc_dma_ok == 0) {
824 			printf(", but unused (couldn't query registers)");
825 			break;
826 		}
827 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
828 		    && addr >= 0x10000) {
829 			sc->sc_dma_ok = 0;
830 			printf(", but unused (registers at unsafe address "
831 			    "%#lx)", (unsigned long)addr);
832 			break;
833 		}
834 		/* FALLTHROUGH */
835 
836 	case PCI_MAPREG_MEM_TYPE_32BIT:
837 		sc->sc_dma_ok = (pci_mapreg_map(pa,
838 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
839 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL) == 0);
840 		sc->sc_dmat = pa->pa_dmat;
841 		if (sc->sc_dma_ok == 0) {
842 			printf(", but unused (couldn't map registers)");
843 		} else {
844 			sc->sc_wdcdev.dma_arg = sc;
845 			sc->sc_wdcdev.dma_init = pciide_dma_init;
846 			sc->sc_wdcdev.dma_start = pciide_dma_start;
847 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
848 		}
849 
850 		if (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
851 		    PCIIDE_OPTIONS_NODMA) {
852 			printf(", but unused (forced off by config file)");
853 			sc->sc_dma_ok = 0;
854 		}
855 		break;
856 
857 	default:
858 		sc->sc_dma_ok = 0;
859 		printf(", but unsupported register maptype (0x%x)", maptype);
860 	}
861 }
862 
863 int
864 pciide_compat_intr(arg)
865 	void *arg;
866 {
867 	struct pciide_channel *cp = arg;
868 
869 #ifdef DIAGNOSTIC
870 	/* should only be called for a compat channel */
871 	if (cp->compat == 0)
872 		panic("pciide compat intr called for non-compat chan %p\n", cp);
873 #endif
874 	return (wdcintr(&cp->wdc_channel));
875 }
876 
877 int
878 pciide_pci_intr(arg)
879 	void *arg;
880 {
881 	struct pciide_softc *sc = arg;
882 	struct pciide_channel *cp;
883 	struct channel_softc *wdc_cp;
884 	int i, rv, crv;
885 
886 	rv = 0;
887 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
888 		cp = &sc->pciide_channels[i];
889 		wdc_cp = &cp->wdc_channel;
890 
891 		/* If a compat channel skip. */
892 		if (cp->compat)
893 			continue;
894 		/* if this channel not waiting for intr, skip */
895 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
896 			continue;
897 
898 		crv = wdcintr(wdc_cp);
899 		if (crv == 0)
900 			;		/* leave rv alone */
901 		else if (crv == 1)
902 			rv = 1;		/* claim the intr */
903 		else if (rv == 0)	/* crv should be -1 in this case */
904 			rv = crv;	/* if we've done no better, take it */
905 	}
906 	return (rv);
907 }
908 
909 void
910 pciide_channel_dma_setup(cp)
911 	struct pciide_channel *cp;
912 {
913 	int drive;
914 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
915 	struct ata_drive_datas *drvp;
916 
917 	for (drive = 0; drive < 2; drive++) {
918 		drvp = &cp->wdc_channel.ch_drive[drive];
919 		/* If no drive, skip */
920 		if ((drvp->drive_flags & DRIVE) == 0)
921 			continue;
922 		/* setup DMA if needed */
923 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
924 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
925 		    sc->sc_dma_ok == 0) {
926 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
927 			continue;
928 		}
929 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
930 		    != 0) {
931 			/* Abort DMA setup */
932 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
933 			continue;
934 		}
935 	}
936 }
937 
938 int
939 pciide_dma_table_setup(sc, channel, drive)
940 	struct pciide_softc *sc;
941 	int channel, drive;
942 {
943 	bus_dma_segment_t seg;
944 	int error, rseg;
945 	const bus_size_t dma_table_size =
946 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
947 	struct pciide_dma_maps *dma_maps =
948 	    &sc->pciide_channels[channel].dma_maps[drive];
949 
950 	/* If table was already allocated, just return */
951 	if (dma_maps->dma_table)
952 		return 0;
953 
954 	/* Allocate memory for the DMA tables and map it */
955 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
956 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
957 	    BUS_DMA_NOWAIT)) != 0) {
958 		printf("%s:%d: unable to allocate table DMA for "
959 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
960 		    channel, drive, error);
961 		return error;
962 	}
963 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
964 	    dma_table_size,
965 	    (caddr_t *)&dma_maps->dma_table,
966 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
967 		printf("%s:%d: unable to map table DMA for"
968 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
969 		    channel, drive, error);
970 		return error;
971 	}
972 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %lu, "
973 	    "phy 0x%lx\n", dma_maps->dma_table, (u_long)dma_table_size,
974 	    (unsigned long)seg.ds_addr), DEBUG_PROBE);
975 
976 	/* Create and load table DMA map for this disk */
977 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
978 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
979 	    &dma_maps->dmamap_table)) != 0) {
980 		printf("%s:%d: unable to create table DMA map for "
981 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
982 		    channel, drive, error);
983 		return error;
984 	}
985 	if ((error = bus_dmamap_load(sc->sc_dmat,
986 	    dma_maps->dmamap_table,
987 	    dma_maps->dma_table,
988 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
989 		printf("%s:%d: unable to load table DMA map for "
990 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
991 		    channel, drive, error);
992 		return error;
993 	}
994 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
995 	    (unsigned long)dma_maps->dmamap_table->dm_segs[0].ds_addr),
996 	    DEBUG_PROBE);
997 	/* Create a xfer DMA map for this drive */
998 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
999 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1000 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1001 	    &dma_maps->dmamap_xfer)) != 0) {
1002 		printf("%s:%d: unable to create xfer DMA map for "
1003 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1004 		    channel, drive, error);
1005 		return error;
1006 	}
1007 	return 0;
1008 }
1009 
1010 int
1011 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1012 	void *v;
1013 	int channel, drive;
1014 	void *databuf;
1015 	size_t datalen;
1016 	int flags;
1017 {
1018 	struct pciide_softc *sc = v;
1019 	int error, seg;
1020 	struct pciide_dma_maps *dma_maps =
1021 	    &sc->pciide_channels[channel].dma_maps[drive];
1022 
1023 	error = bus_dmamap_load(sc->sc_dmat,
1024 	    dma_maps->dmamap_xfer,
1025 	    databuf, datalen, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1026 	    ((flags & WDC_DMA_READ) ? BUS_DMA_READ : BUS_DMA_WRITE));
1027 	if (error) {
1028 		printf("%s:%d: unable to load xfer DMA map for"
1029 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1030 		    channel, drive, error);
1031 		return error;
1032 	}
1033 
1034 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1035 	    dma_maps->dmamap_xfer->dm_mapsize,
1036 	    (flags & WDC_DMA_READ) ?
1037 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1038 
1039 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1040 #ifdef DIAGNOSTIC
1041 		/* A segment must not cross a 64k boundary */
1042 		{
1043 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1044 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1045 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1046 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1047 			printf("pciide_dma: segment %d physical addr 0x%lx"
1048 			    " len 0x%lx not properly aligned\n",
1049 			    seg, phys, len);
1050 			panic("pciide_dma: buf align");
1051 		}
1052 		}
1053 #endif
1054 		dma_maps->dma_table[seg].base_addr =
1055 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1056 		dma_maps->dma_table[seg].byte_count =
1057 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1058 		    IDEDMA_BYTE_COUNT_MASK);
1059 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1060 		   seg, le32toh(dma_maps->dma_table[seg].byte_count),
1061 		   le32toh(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1062 
1063 	}
1064 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1065 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1066 
1067 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1068 	    dma_maps->dmamap_table->dm_mapsize,
1069 	    BUS_DMASYNC_PREWRITE);
1070 
1071 	/* Maps are ready. Start DMA function */
1072 #ifdef DIAGNOSTIC
1073 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1074 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1075 		    (u_long)dma_maps->dmamap_table->dm_segs[0].ds_addr);
1076 		panic("pciide_dma_init: table align");
1077 	}
1078 #endif
1079 
1080 	/* Clear status bits */
1081 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1082 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1083 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1084 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1085 	/* Write table addr */
1086 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1087 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1088 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1089 	/* set read/write */
1090 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1091 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1092 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1093 	/* remember flags */
1094 	dma_maps->dma_flags = flags;
1095 	return 0;
1096 }
1097 
1098 void
1099 pciide_dma_start(v, channel, drive)
1100 	void *v;
1101 	int channel, drive;
1102 {
1103 	struct pciide_softc *sc = v;
1104 
1105 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1106 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1107 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1108 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1109 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1110 }
1111 
1112 int
1113 pciide_dma_finish(v, channel, drive, force)
1114 	void *v;
1115 	int channel, drive;
1116 	int force;
1117 {
1118 	struct pciide_softc *sc = v;
1119 	u_int8_t status;
1120 	int error = 0;
1121 	struct pciide_dma_maps *dma_maps =
1122 	    &sc->pciide_channels[channel].dma_maps[drive];
1123 
1124 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1125 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1126 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1127 	    DEBUG_XFERS);
1128 
1129 	if (force == 0 && (status & IDEDMA_CTL_INTR) == 0)
1130 		return WDC_DMAST_NOIRQ;
1131 
1132 	/* stop DMA channel */
1133 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1134 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1135 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1136 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) & ~IDEDMA_CMD_START);
1137 
1138 	/* Unload the map of the data buffer */
1139 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1140 	    dma_maps->dmamap_xfer->dm_mapsize,
1141 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1142 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1143 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1144 
1145 	if ((status & IDEDMA_CTL_ERR) != 0) {
1146 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1147 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1148 		error |= WDC_DMAST_ERR;
1149 	}
1150 
1151 	if ((status & IDEDMA_CTL_INTR) == 0) {
1152 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1153 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1154 		    drive, status);
1155 		error |= WDC_DMAST_NOIRQ;
1156 	}
1157 
1158 	if ((status & IDEDMA_CTL_ACT) != 0) {
1159 		/* data underrun, may be a valid condition for ATAPI */
1160 		error |= WDC_DMAST_UNDER;
1161 	}
1162 	return error;
1163 }
1164 
1165 void
1166 pciide_irqack(chp)
1167 	struct channel_softc *chp;
1168 {
1169 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1170 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1171 
1172 	/* clear status bits in IDE DMA registers */
1173 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1174 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1175 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1176 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1177 }
1178 
1179 /* some common code used by several chip_map */
1180 int
1181 pciide_chansetup(sc, channel, interface)
1182 	struct pciide_softc *sc;
1183 	int channel;
1184 	pcireg_t interface;
1185 {
1186 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1187 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1188 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1189 	cp->wdc_channel.channel = channel;
1190 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1191 	cp->wdc_channel.ch_queue =
1192 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1193 	if (cp->wdc_channel.ch_queue == NULL) {
1194 		printf("%s %s channel: "
1195 		    "can't allocate memory for command queue",
1196 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1197 		return 0;
1198 	}
1199 	printf("%s: %s channel %s to %s mode\n",
1200 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1201 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
1202 	    "configured" : "wired",
1203 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
1204 	    "native-PCI" : "compatibility");
1205 	return 1;
1206 }
1207 
1208 /* some common code used by several chip channel_map */
1209 void
1210 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1211 	struct pci_attach_args *pa;
1212 	struct pciide_channel *cp;
1213 	pcireg_t interface;
1214 	bus_size_t *cmdsizep, *ctlsizep;
1215 	int (*pci_intr) __P((void *));
1216 {
1217 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1218 
1219 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1220 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1221 		    pci_intr);
1222 	else
1223 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1224 		    wdc_cp->channel, cmdsizep, ctlsizep);
1225 
1226 	if (cp->hw_ok == 0)
1227 		return;
1228 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1229 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1230 	wdcattach(wdc_cp);
1231 }
1232 
1233 /*
1234  * Generic code to call to know if a channel can be disabled. Return 1
1235  * if channel can be disabled, 0 if not
1236  */
1237 int
1238 pciide_chan_candisable(cp)
1239 	struct pciide_channel *cp;
1240 {
1241 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1242 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1243 
1244 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1245 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1246 		printf("%s: disabling %s channel (no drives)\n",
1247 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1248 		cp->hw_ok = 0;
1249 		return 1;
1250 	}
1251 	return 0;
1252 }
1253 
1254 /*
1255  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1256  * Set hw_ok=0 on failure
1257  */
1258 void
1259 pciide_map_compat_intr(pa, cp, compatchan, interface)
1260 	struct pci_attach_args *pa;
1261 	struct pciide_channel *cp;
1262 	int compatchan, interface;
1263 {
1264 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1265 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1266 
1267 	if (cp->hw_ok == 0)
1268 		return;
1269 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1270 		return;
1271 
1272 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1273 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1274 	    pa, compatchan, pciide_compat_intr, cp);
1275 	if (cp->ih == NULL) {
1276 #endif
1277 		printf("%s: no compatibility interrupt for use by %s "
1278 		    "channel\n", sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1279 		cp->hw_ok = 0;
1280 #ifdef __HAVE_PCIIDE_MACHDEP_COMPAT_INTR_ESTABLISH
1281 	}
1282 #endif
1283 }
1284 
1285 void
1286 pciide_print_modes(cp)
1287 	struct pciide_channel *cp;
1288 {
1289 	wdc_print_modes(&cp->wdc_channel);
1290 }
1291 
1292 void
1293 default_chip_map(sc, pa)
1294 	struct pciide_softc *sc;
1295 	struct pci_attach_args *pa;
1296 {
1297 	struct pciide_channel *cp;
1298 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1299 	pcireg_t csr;
1300 	int channel, drive;
1301 	struct ata_drive_datas *drvp;
1302 	u_int8_t idedma_ctl;
1303 	bus_size_t cmdsize, ctlsize;
1304 	char *failreason;
1305 
1306 	if (pciide_chipen(sc, pa) == 0)
1307 		return;
1308 
1309 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1310 		printf("%s: bus-master DMA support present",
1311 		    sc->sc_wdcdev.sc_dev.dv_xname);
1312 		if (sc->sc_pp == &default_product_desc &&
1313 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1314 		    PCIIDE_OPTIONS_DMA) == 0) {
1315 			printf(", but unused (no driver support)");
1316 			sc->sc_dma_ok = 0;
1317 		} else {
1318 			pciide_mapreg_dma(sc, pa);
1319 			if (sc->sc_dma_ok != 0)
1320 				printf(", used without full driver "
1321 				    "support");
1322 		}
1323 	} else {
1324 		printf("%s: hardware does not support DMA",
1325 		    sc->sc_wdcdev.sc_dev.dv_xname);
1326 		sc->sc_dma_ok = 0;
1327 	}
1328 	printf("\n");
1329 	if (sc->sc_dma_ok) {
1330 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1331 		sc->sc_wdcdev.irqack = pciide_irqack;
1332 	}
1333 	sc->sc_wdcdev.PIO_cap = 0;
1334 	sc->sc_wdcdev.DMA_cap = 0;
1335 
1336 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1337 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1338 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1339 
1340 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1341 		cp = &sc->pciide_channels[channel];
1342 		if (pciide_chansetup(sc, channel, interface) == 0)
1343 			continue;
1344 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1345 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1346 			    &ctlsize, pciide_pci_intr);
1347 		} else {
1348 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1349 			    channel, &cmdsize, &ctlsize);
1350 		}
1351 		if (cp->hw_ok == 0)
1352 			continue;
1353 		/*
1354 		 * Check to see if something appears to be there.
1355 		 */
1356 		failreason = NULL;
1357 		if (!wdcprobe(&cp->wdc_channel)) {
1358 			failreason = "not responding; disabled or no drives?";
1359 			goto next;
1360 		}
1361 		/*
1362 		 * Now, make sure it's actually attributable to this PCI IDE
1363 		 * channel by trying to access the channel again while the
1364 		 * PCI IDE controller's I/O space is disabled.  (If the
1365 		 * channel no longer appears to be there, it belongs to
1366 		 * this controller.)  YUCK!
1367 		 */
1368 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1369 		    PCI_COMMAND_STATUS_REG);
1370 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1371 		    csr & ~PCI_COMMAND_IO_ENABLE);
1372 		if (wdcprobe(&cp->wdc_channel))
1373 			failreason = "other hardware responding at addresses";
1374 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1375 		    PCI_COMMAND_STATUS_REG, csr);
1376 next:
1377 		if (failreason) {
1378 			printf("%s: %s channel ignored (%s)\n",
1379 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1380 			    failreason);
1381 			cp->hw_ok = 0;
1382 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1383 			    cp->wdc_channel.cmd_ioh, cmdsize);
1384 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1385 			    cp->wdc_channel.ctl_ioh, ctlsize);
1386 		} else {
1387 			pciide_map_compat_intr(pa, cp, channel, interface);
1388 		}
1389 		if (cp->hw_ok) {
1390 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1391 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1392 			wdcattach(&cp->wdc_channel);
1393 		}
1394 	}
1395 
1396 	if (sc->sc_dma_ok == 0)
1397 		return;
1398 
1399 	/* Allocate DMA maps */
1400 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1401 		idedma_ctl = 0;
1402 		cp = &sc->pciide_channels[channel];
1403 		for (drive = 0; drive < 2; drive++) {
1404 			drvp = &cp->wdc_channel.ch_drive[drive];
1405 			/* If no drive, skip */
1406 			if ((drvp->drive_flags & DRIVE) == 0)
1407 				continue;
1408 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1409 				continue;
1410 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1411 				/* Abort DMA setup */
1412 				printf("%s:%d:%d: can't allocate DMA maps, "
1413 				    "using PIO transfers\n",
1414 				    sc->sc_wdcdev.sc_dev.dv_xname,
1415 				    channel, drive);
1416 				drvp->drive_flags &= ~DRIVE_DMA;
1417 			}
1418 			printf("%s:%d:%d: using DMA data transfers\n",
1419 			    sc->sc_wdcdev.sc_dev.dv_xname,
1420 			    channel, drive);
1421 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1422 		}
1423 		if (idedma_ctl != 0) {
1424 			/* Add software bits in status register */
1425 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1426 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1427 			    idedma_ctl);
1428 		}
1429 	}
1430 }
1431 
1432 void
1433 piix_chip_map(sc, pa)
1434 	struct pciide_softc *sc;
1435 	struct pci_attach_args *pa;
1436 {
1437 	struct pciide_channel *cp;
1438 	int channel;
1439 	u_int32_t idetim;
1440 	bus_size_t cmdsize, ctlsize;
1441 
1442 	if (pciide_chipen(sc, pa) == 0)
1443 		return;
1444 
1445 	printf("%s: bus-master DMA support present",
1446 	    sc->sc_wdcdev.sc_dev.dv_xname);
1447 	pciide_mapreg_dma(sc, pa);
1448 	printf("\n");
1449 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1450 	    WDC_CAPABILITY_MODE;
1451 	if (sc->sc_dma_ok) {
1452 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1453 		sc->sc_wdcdev.irqack = pciide_irqack;
1454 		switch(sc->sc_pp->ide_product) {
1455 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1456 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1457 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1458 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1459 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1460 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1461 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1462 		}
1463 	}
1464 	sc->sc_wdcdev.PIO_cap = 4;
1465 	sc->sc_wdcdev.DMA_cap = 2;
1466 	switch(sc->sc_pp->ide_product) {
1467 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1468 		sc->sc_wdcdev.UDMA_cap = 4;
1469 		break;
1470 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1471 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1472 		sc->sc_wdcdev.UDMA_cap = 5;
1473 		break;
1474 	default:
1475 		sc->sc_wdcdev.UDMA_cap = 2;
1476 	}
1477 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1478 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1479 	else
1480 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1481 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1482 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1483 
1484 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1485 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1486 	    DEBUG_PROBE);
1487 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1488 		WDCDEBUG_PRINT((", sidetim=0x%x",
1489 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1490 		    DEBUG_PROBE);
1491 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1492 			WDCDEBUG_PRINT((", udamreg 0x%x",
1493 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1494 			    DEBUG_PROBE);
1495 		}
1496 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1497 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1498 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1499 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1500 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1501 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1502 			    DEBUG_PROBE);
1503 		}
1504 
1505 	}
1506 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1507 
1508 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1509 		cp = &sc->pciide_channels[channel];
1510 		/* PIIX is compat-only */
1511 		if (pciide_chansetup(sc, channel, 0) == 0)
1512 			continue;
1513 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1514 		if ((PIIX_IDETIM_READ(idetim, channel) &
1515 		    PIIX_IDETIM_IDE) == 0) {
1516 			printf("%s: %s channel ignored (disabled)\n",
1517 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1518 			continue;
1519 		}
1520 		/* PIIX are compat-only pciide devices */
1521 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1522 		if (cp->hw_ok == 0)
1523 			continue;
1524 		if (pciide_chan_candisable(cp)) {
1525 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1526 			    channel);
1527 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1528 			    idetim);
1529 		}
1530 		pciide_map_compat_intr(pa, cp, channel, 0);
1531 		if (cp->hw_ok == 0)
1532 			continue;
1533 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1534 	}
1535 
1536 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1537 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1538 	    DEBUG_PROBE);
1539 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1540 		WDCDEBUG_PRINT((", sidetim=0x%x",
1541 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1542 		    DEBUG_PROBE);
1543 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1544 			WDCDEBUG_PRINT((", udamreg 0x%x",
1545 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1546 			    DEBUG_PROBE);
1547 		}
1548 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1549 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1550 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1551 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1552 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1553 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1554 			    DEBUG_PROBE);
1555 		}
1556 	}
1557 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1558 }
1559 
1560 void
1561 piix_setup_channel(chp)
1562 	struct channel_softc *chp;
1563 {
1564 	u_int8_t mode[2], drive;
1565 	u_int32_t oidetim, idetim, idedma_ctl;
1566 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1567 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1568 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1569 
1570 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1571 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1572 	idedma_ctl = 0;
1573 
1574 	/* set up new idetim: Enable IDE registers decode */
1575 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1576 	    chp->channel);
1577 
1578 	/* setup DMA */
1579 	pciide_channel_dma_setup(cp);
1580 
1581 	/*
1582 	 * Here we have to mess up with drives mode: PIIX can't have
1583 	 * different timings for master and slave drives.
1584 	 * We need to find the best combination.
1585 	 */
1586 
1587 	/* If both drives supports DMA, take the lower mode */
1588 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1589 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1590 		mode[0] = mode[1] =
1591 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1592 		    drvp[0].DMA_mode = mode[0];
1593 		    drvp[1].DMA_mode = mode[1];
1594 		goto ok;
1595 	}
1596 	/*
1597 	 * If only one drive supports DMA, use its mode, and
1598 	 * put the other one in PIO mode 0 if mode not compatible
1599 	 */
1600 	if (drvp[0].drive_flags & DRIVE_DMA) {
1601 		mode[0] = drvp[0].DMA_mode;
1602 		mode[1] = drvp[1].PIO_mode;
1603 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1604 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1605 			mode[1] = drvp[1].PIO_mode = 0;
1606 		goto ok;
1607 	}
1608 	if (drvp[1].drive_flags & DRIVE_DMA) {
1609 		mode[1] = drvp[1].DMA_mode;
1610 		mode[0] = drvp[0].PIO_mode;
1611 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1612 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1613 			mode[0] = drvp[0].PIO_mode = 0;
1614 		goto ok;
1615 	}
1616 	/*
1617 	 * If both drives are not DMA, takes the lower mode, unless
1618 	 * one of them is PIO mode < 2
1619 	 */
1620 	if (drvp[0].PIO_mode < 2) {
1621 		mode[0] = drvp[0].PIO_mode = 0;
1622 		mode[1] = drvp[1].PIO_mode;
1623 	} else if (drvp[1].PIO_mode < 2) {
1624 		mode[1] = drvp[1].PIO_mode = 0;
1625 		mode[0] = drvp[0].PIO_mode;
1626 	} else {
1627 		mode[0] = mode[1] =
1628 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1629 		drvp[0].PIO_mode = mode[0];
1630 		drvp[1].PIO_mode = mode[1];
1631 	}
1632 ok:	/* The modes are setup */
1633 	for (drive = 0; drive < 2; drive++) {
1634 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1635 			idetim |= piix_setup_idetim_timings(
1636 			    mode[drive], 1, chp->channel);
1637 			goto end;
1638 		}
1639 	}
1640 	/* If we are there, none of the drives are DMA */
1641 	if (mode[0] >= 2)
1642 		idetim |= piix_setup_idetim_timings(
1643 		    mode[0], 0, chp->channel);
1644 	else
1645 		idetim |= piix_setup_idetim_timings(
1646 		    mode[1], 0, chp->channel);
1647 end:	/*
1648 	 * timing mode is now set up in the controller. Enable
1649 	 * it per-drive
1650 	 */
1651 	for (drive = 0; drive < 2; drive++) {
1652 		/* If no drive, skip */
1653 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1654 			continue;
1655 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1656 		if (drvp[drive].drive_flags & DRIVE_DMA)
1657 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1658 	}
1659 	if (idedma_ctl != 0) {
1660 		/* Add software bits in status register */
1661 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1662 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1663 		    idedma_ctl);
1664 	}
1665 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1666 	pciide_print_modes(cp);
1667 }
1668 
1669 void
1670 piix3_4_setup_channel(chp)
1671 	struct channel_softc *chp;
1672 {
1673 	struct ata_drive_datas *drvp;
1674 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1675 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1676 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1677 	int drive;
1678 	int channel = chp->channel;
1679 
1680 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1681 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1682 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1683 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1684 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1685 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1686 	    PIIX_SIDETIM_RTC_MASK(channel));
1687 
1688 	idedma_ctl = 0;
1689 	/* If channel disabled, no need to go further */
1690 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1691 		return;
1692 	/* set up new idetim: Enable IDE registers decode */
1693 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1694 
1695 	/* setup DMA if needed */
1696 	pciide_channel_dma_setup(cp);
1697 
1698 	for (drive = 0; drive < 2; drive++) {
1699 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1700 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1701 		drvp = &chp->ch_drive[drive];
1702 		/* If no drive, skip */
1703 		if ((drvp->drive_flags & DRIVE) == 0)
1704 			continue;
1705 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1706 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1707 			goto pio;
1708 
1709 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1710 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1711 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1712 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1713 			ideconf |= PIIX_CONFIG_PINGPONG;
1714 		}
1715 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ||
1716 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE) {
1717 			/* setup Ultra/100 */
1718 			if (drvp->UDMA_mode > 2 &&
1719 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1720 				drvp->UDMA_mode = 2;
1721 			if (drvp->UDMA_mode > 4) {
1722 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1723 			} else {
1724 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1725 				if (drvp->UDMA_mode > 2) {
1726 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1727 					    drive);
1728 				} else {
1729 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1730 					    drive);
1731 				}
1732 			}
1733 		}
1734 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1735 			/* setup Ultra/66 */
1736 			if (drvp->UDMA_mode > 2 &&
1737 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1738 				drvp->UDMA_mode = 2;
1739 			if (drvp->UDMA_mode > 2)
1740 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1741 			else
1742 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1743 		}
1744 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1745 		    (drvp->drive_flags & DRIVE_UDMA)) {
1746 			/* use Ultra/DMA */
1747 			drvp->drive_flags &= ~DRIVE_DMA;
1748 			udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive);
1749 			udmareg |= PIIX_UDMATIM_SET(
1750 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1751 		} else {
1752 			/* use Multiword DMA */
1753 			drvp->drive_flags &= ~DRIVE_UDMA;
1754 			if (drive == 0) {
1755 				idetim |= piix_setup_idetim_timings(
1756 				    drvp->DMA_mode, 1, channel);
1757 			} else {
1758 				sidetim |= piix_setup_sidetim_timings(
1759 					drvp->DMA_mode, 1, channel);
1760 				idetim =PIIX_IDETIM_SET(idetim,
1761 				    PIIX_IDETIM_SITRE, channel);
1762 			}
1763 		}
1764 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1765 
1766 pio:		/* use PIO mode */
1767 		idetim |= piix_setup_idetim_drvs(drvp);
1768 		if (drive == 0) {
1769 			idetim |= piix_setup_idetim_timings(
1770 			    drvp->PIO_mode, 0, channel);
1771 		} else {
1772 			sidetim |= piix_setup_sidetim_timings(
1773 				drvp->PIO_mode, 0, channel);
1774 			idetim =PIIX_IDETIM_SET(idetim,
1775 			    PIIX_IDETIM_SITRE, channel);
1776 		}
1777 	}
1778 	if (idedma_ctl != 0) {
1779 		/* Add software bits in status register */
1780 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1781 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1782 		    idedma_ctl);
1783 	}
1784 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1785 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1786 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1787 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1788 	pciide_print_modes(cp);
1789 }
1790 
1791 
1792 /* setup ISP and RTC fields, based on mode */
1793 static u_int32_t
1794 piix_setup_idetim_timings(mode, dma, channel)
1795 	u_int8_t mode;
1796 	u_int8_t dma;
1797 	u_int8_t channel;
1798 {
1799 
1800 	if (dma)
1801 		return PIIX_IDETIM_SET(0,
1802 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1803 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1804 		    channel);
1805 	else
1806 		return PIIX_IDETIM_SET(0,
1807 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1808 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1809 		    channel);
1810 }
1811 
1812 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1813 static u_int32_t
1814 piix_setup_idetim_drvs(drvp)
1815 	struct ata_drive_datas *drvp;
1816 {
1817 	u_int32_t ret = 0;
1818 	struct channel_softc *chp = drvp->chnl_softc;
1819 	u_int8_t channel = chp->channel;
1820 	u_int8_t drive = drvp->drive;
1821 
1822 	/*
1823 	 * If drive is using UDMA, timings setups are independant
1824 	 * So just check DMA and PIO here.
1825 	 */
1826 	if (drvp->drive_flags & DRIVE_DMA) {
1827 		/* if mode = DMA mode 0, use compatible timings */
1828 		if ((drvp->drive_flags & DRIVE_DMA) &&
1829 		    drvp->DMA_mode == 0) {
1830 			drvp->PIO_mode = 0;
1831 			return ret;
1832 		}
1833 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1834 		/*
1835 		 * PIO and DMA timings are the same, use fast timings for PIO
1836 		 * too, else use compat timings.
1837 		 */
1838 		if ((piix_isp_pio[drvp->PIO_mode] !=
1839 		    piix_isp_dma[drvp->DMA_mode]) ||
1840 		    (piix_rtc_pio[drvp->PIO_mode] !=
1841 		    piix_rtc_dma[drvp->DMA_mode]))
1842 			drvp->PIO_mode = 0;
1843 		/* if PIO mode <= 2, use compat timings for PIO */
1844 		if (drvp->PIO_mode <= 2) {
1845 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1846 			    channel);
1847 			return ret;
1848 		}
1849 	}
1850 
1851 	/*
1852 	 * Now setup PIO modes. If mode < 2, use compat timings.
1853 	 * Else enable fast timings. Enable IORDY and prefetch/post
1854 	 * if PIO mode >= 3.
1855 	 */
1856 
1857 	if (drvp->PIO_mode < 2)
1858 		return ret;
1859 
1860 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1861 	if (drvp->PIO_mode >= 3) {
1862 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1863 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1864 	}
1865 	return ret;
1866 }
1867 
1868 /* setup values in SIDETIM registers, based on mode */
1869 static u_int32_t
1870 piix_setup_sidetim_timings(mode, dma, channel)
1871 	u_int8_t mode;
1872 	u_int8_t dma;
1873 	u_int8_t channel;
1874 {
1875 	if (dma)
1876 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1877 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1878 	else
1879 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1880 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1881 }
1882 
1883 void
1884 amd7x6_chip_map(sc, pa)
1885 	struct pciide_softc *sc;
1886 	struct pci_attach_args *pa;
1887 {
1888 	struct pciide_channel *cp;
1889 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1890 	int channel;
1891 	pcireg_t chanenable;
1892 	bus_size_t cmdsize, ctlsize;
1893 
1894 	if (pciide_chipen(sc, pa) == 0)
1895 		return;
1896 	printf("%s: bus-master DMA support present",
1897 	    sc->sc_wdcdev.sc_dev.dv_xname);
1898 	pciide_mapreg_dma(sc, pa);
1899 	printf("\n");
1900 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1901 	    WDC_CAPABILITY_MODE;
1902 	if (sc->sc_dma_ok) {
1903 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1904 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1905 		sc->sc_wdcdev.irqack = pciide_irqack;
1906 	}
1907 	sc->sc_wdcdev.PIO_cap = 4;
1908 	sc->sc_wdcdev.DMA_cap = 2;
1909 
1910 	if (sc->sc_pp->ide_product == PCI_PRODUCT_AMD_PBC766_IDE)
1911 		sc->sc_wdcdev.UDMA_cap = 5;
1912 	else
1913 		sc->sc_wdcdev.UDMA_cap = 4;
1914 	sc->sc_wdcdev.set_modes = amd7x6_setup_channel;
1915 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1916 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1917 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN);
1918 
1919 	WDCDEBUG_PRINT(("amd7x6_chip_map: Channel enable=0x%x\n", chanenable),
1920 	    DEBUG_PROBE);
1921 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1922 		cp = &sc->pciide_channels[channel];
1923 		if (pciide_chansetup(sc, channel, interface) == 0)
1924 			continue;
1925 
1926 		if ((chanenable & AMD7X6_CHAN_EN(channel)) == 0) {
1927 			printf("%s: %s channel ignored (disabled)\n",
1928 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1929 			continue;
1930 		}
1931 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1932 		    pciide_pci_intr);
1933 
1934 		if (pciide_chan_candisable(cp))
1935 			chanenable &= ~AMD7X6_CHAN_EN(channel);
1936 		pciide_map_compat_intr(pa, cp, channel, interface);
1937 		if (cp->hw_ok == 0)
1938 			continue;
1939 
1940 		amd7x6_setup_channel(&cp->wdc_channel);
1941 	}
1942 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_CHANSTATUS_EN,
1943 	    chanenable);
1944 	return;
1945 }
1946 
1947 void
1948 amd7x6_setup_channel(chp)
1949 	struct channel_softc *chp;
1950 {
1951 	u_int32_t udmatim_reg, datatim_reg;
1952 	u_int8_t idedma_ctl;
1953 	int mode, drive;
1954 	struct ata_drive_datas *drvp;
1955 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1956 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1957 #ifndef PCIIDE_AMD756_ENABLEDMA
1958 	int rev = PCI_REVISION(
1959 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
1960 #endif
1961 
1962 	idedma_ctl = 0;
1963 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM);
1964 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA);
1965 	datatim_reg &= ~AMD7X6_DATATIM_MASK(chp->channel);
1966 	udmatim_reg &= ~AMD7X6_UDMA_MASK(chp->channel);
1967 
1968 	/* setup DMA if needed */
1969 	pciide_channel_dma_setup(cp);
1970 
1971 	for (drive = 0; drive < 2; drive++) {
1972 		drvp = &chp->ch_drive[drive];
1973 		/* If no drive, skip */
1974 		if ((drvp->drive_flags & DRIVE) == 0)
1975 			continue;
1976 		/* add timing values, setup DMA if needed */
1977 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1978 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
1979 			mode = drvp->PIO_mode;
1980 			goto pio;
1981 		}
1982 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1983 		    (drvp->drive_flags & DRIVE_UDMA)) {
1984 			/* use Ultra/DMA */
1985 			drvp->drive_flags &= ~DRIVE_DMA;
1986 			udmatim_reg |= AMD7X6_UDMA_EN(chp->channel, drive) |
1987 			    AMD7X6_UDMA_EN_MTH(chp->channel, drive) |
1988 			    AMD7X6_UDMA_TIME(chp->channel, drive,
1989 				amd7x6_udma_tim[drvp->UDMA_mode]);
1990 			/* can use PIO timings, MW DMA unused */
1991 			mode = drvp->PIO_mode;
1992 		} else {
1993 			/* use Multiword DMA, but only if revision is OK */
1994 			drvp->drive_flags &= ~DRIVE_UDMA;
1995 #ifndef PCIIDE_AMD756_ENABLEDMA
1996 			/*
1997 			 * The workaround doesn't seem to be necessary
1998 			 * with all drives, so it can be disabled by
1999 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2000 			 * triggered.
2001 			 */
2002 			if (sc->sc_pp->ide_product ==
2003 			      PCI_PRODUCT_AMD_PBC756_IDE &&
2004 			    AMD756_CHIPREV_DISABLEDMA(rev)) {
2005 				printf("%s:%d:%d: multi-word DMA disabled due "
2006 				    "to chip revision\n",
2007 				    sc->sc_wdcdev.sc_dev.dv_xname,
2008 				    chp->channel, drive);
2009 				mode = drvp->PIO_mode;
2010 				drvp->drive_flags &= ~DRIVE_DMA;
2011 				goto pio;
2012 			}
2013 #endif
2014 			/* mode = min(pio, dma+2) */
2015 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2016 				mode = drvp->PIO_mode;
2017 			else
2018 				mode = drvp->DMA_mode + 2;
2019 		}
2020 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2021 
2022 pio:		/* setup PIO mode */
2023 		if (mode <= 2) {
2024 			drvp->DMA_mode = 0;
2025 			drvp->PIO_mode = 0;
2026 			mode = 0;
2027 		} else {
2028 			drvp->PIO_mode = mode;
2029 			drvp->DMA_mode = mode - 2;
2030 		}
2031 		datatim_reg |=
2032 		    AMD7X6_DATATIM_PULSE(chp->channel, drive,
2033 			amd7x6_pio_set[mode]) |
2034 		    AMD7X6_DATATIM_RECOV(chp->channel, drive,
2035 			amd7x6_pio_rec[mode]);
2036 	}
2037 	if (idedma_ctl != 0) {
2038 		/* Add software bits in status register */
2039 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2040 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2041 		    idedma_ctl);
2042 	}
2043 	pciide_print_modes(cp);
2044 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_DATATIM, datatim_reg);
2045 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD7X6_UDMA, udmatim_reg);
2046 }
2047 
2048 void
2049 apollo_chip_map(sc, pa)
2050 	struct pciide_softc *sc;
2051 	struct pci_attach_args *pa;
2052 {
2053 	struct pciide_channel *cp;
2054 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2055 	int channel;
2056 	u_int32_t ideconf;
2057 	bus_size_t cmdsize, ctlsize;
2058 	pcitag_t pcib_tag;
2059 	pcireg_t pcib_id, pcib_class;
2060 
2061 	if (pciide_chipen(sc, pa) == 0)
2062 		return;
2063 	/* get a PCI tag for the ISA bridge (function 0 of the same device) */
2064 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2065 	/* and read ID and rev of the ISA bridge */
2066 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2067 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2068 	printf(": VIA Technologies ");
2069 	switch (PCI_PRODUCT(pcib_id)) {
2070 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2071 		printf("VT82C586 (Apollo VP) ");
2072 		if(PCI_REVISION(pcib_class) >= 0x02) {
2073 			printf("ATA33 controller\n");
2074 			sc->sc_wdcdev.UDMA_cap = 2;
2075 		} else {
2076 			printf("controller\n");
2077 			sc->sc_wdcdev.UDMA_cap = 0;
2078 		}
2079 		break;
2080 	case PCI_PRODUCT_VIATECH_VT82C596A:
2081 		printf("VT82C596A (Apollo Pro) ");
2082 		if (PCI_REVISION(pcib_class) >= 0x12) {
2083 			printf("ATA66 controller\n");
2084 			sc->sc_wdcdev.UDMA_cap = 4;
2085 		} else {
2086 			printf("ATA33 controller\n");
2087 			sc->sc_wdcdev.UDMA_cap = 2;
2088 		}
2089 		break;
2090 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2091 		printf("VT82C686A (Apollo KX133) ");
2092 		if (PCI_REVISION(pcib_class) >= 0x40) {
2093 			printf("ATA100 controller\n");
2094 			sc->sc_wdcdev.UDMA_cap = 5;
2095 		} else {
2096 			printf("ATA66 controller\n");
2097 			sc->sc_wdcdev.UDMA_cap = 4;
2098 		}
2099 		break;
2100 	case PCI_PRODUCT_VIATECH_VT8233:
2101 		printf("VT8233 ATA100 controller\n");
2102 		sc->sc_wdcdev.UDMA_cap = 5;
2103 		break;
2104 	default:
2105 		printf("unknown ATA controller\n");
2106 		sc->sc_wdcdev.UDMA_cap = 0;
2107 	}
2108 
2109 	printf("%s: bus-master DMA support present",
2110 	    sc->sc_wdcdev.sc_dev.dv_xname);
2111 	pciide_mapreg_dma(sc, pa);
2112 	printf("\n");
2113 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2114 	    WDC_CAPABILITY_MODE;
2115 	if (sc->sc_dma_ok) {
2116 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2117 		sc->sc_wdcdev.irqack = pciide_irqack;
2118 		if (sc->sc_wdcdev.UDMA_cap > 0)
2119 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2120 	}
2121 	sc->sc_wdcdev.PIO_cap = 4;
2122 	sc->sc_wdcdev.DMA_cap = 2;
2123 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2124 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2125 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2126 
2127 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2128 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2129 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2130 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2131 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2132 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2133 	    DEBUG_PROBE);
2134 
2135 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2136 		cp = &sc->pciide_channels[channel];
2137 		if (pciide_chansetup(sc, channel, interface) == 0)
2138 			continue;
2139 
2140 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2141 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2142 			printf("%s: %s channel ignored (disabled)\n",
2143 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2144 			continue;
2145 		}
2146 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2147 		    pciide_pci_intr);
2148 		if (cp->hw_ok == 0)
2149 			continue;
2150 		if (pciide_chan_candisable(cp)) {
2151 			ideconf &= ~APO_IDECONF_EN(channel);
2152 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2153 			    ideconf);
2154 		}
2155 		pciide_map_compat_intr(pa, cp, channel, interface);
2156 
2157 		if (cp->hw_ok == 0)
2158 			continue;
2159 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2160 	}
2161 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2162 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2163 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2164 }
2165 
2166 void
2167 apollo_setup_channel(chp)
2168 	struct channel_softc *chp;
2169 {
2170 	u_int32_t udmatim_reg, datatim_reg;
2171 	u_int8_t idedma_ctl;
2172 	int mode, drive;
2173 	struct ata_drive_datas *drvp;
2174 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2175 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2176 
2177 	idedma_ctl = 0;
2178 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2179 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2180 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2181 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2182 
2183 	/* setup DMA if needed */
2184 	pciide_channel_dma_setup(cp);
2185 
2186 	for (drive = 0; drive < 2; drive++) {
2187 		drvp = &chp->ch_drive[drive];
2188 		/* If no drive, skip */
2189 		if ((drvp->drive_flags & DRIVE) == 0)
2190 			continue;
2191 		/* add timing values, setup DMA if needed */
2192 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2193 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2194 			mode = drvp->PIO_mode;
2195 			goto pio;
2196 		}
2197 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2198 		    (drvp->drive_flags & DRIVE_UDMA)) {
2199 			/* use Ultra/DMA */
2200 			drvp->drive_flags &= ~DRIVE_DMA;
2201 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2202 			    APO_UDMA_EN_MTH(chp->channel, drive);
2203 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2204 				/* 686b */
2205 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2206 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2207 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2208 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2209 				/* 596b or 686a */
2210 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2211 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2212 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2213 			} else {
2214 				/* 596a or 586b */
2215 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2216 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2217 			}
2218 			/* can use PIO timings, MW DMA unused */
2219 			mode = drvp->PIO_mode;
2220 		} else {
2221 			/* use Multiword DMA */
2222 			drvp->drive_flags &= ~DRIVE_UDMA;
2223 			/* mode = min(pio, dma+2) */
2224 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2225 				mode = drvp->PIO_mode;
2226 			else
2227 				mode = drvp->DMA_mode + 2;
2228 		}
2229 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2230 
2231 pio:		/* setup PIO mode */
2232 		if (mode <= 2) {
2233 			drvp->DMA_mode = 0;
2234 			drvp->PIO_mode = 0;
2235 			mode = 0;
2236 		} else {
2237 			drvp->PIO_mode = mode;
2238 			drvp->DMA_mode = mode - 2;
2239 		}
2240 		datatim_reg |=
2241 		    APO_DATATIM_PULSE(chp->channel, drive,
2242 			apollo_pio_set[mode]) |
2243 		    APO_DATATIM_RECOV(chp->channel, drive,
2244 			apollo_pio_rec[mode]);
2245 	}
2246 	if (idedma_ctl != 0) {
2247 		/* Add software bits in status register */
2248 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2249 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2250 		    idedma_ctl);
2251 	}
2252 	pciide_print_modes(cp);
2253 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2254 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2255 }
2256 
2257 void
2258 cmd_channel_map(pa, sc, channel)
2259 	struct pci_attach_args *pa;
2260 	struct pciide_softc *sc;
2261 	int channel;
2262 {
2263 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2264 	bus_size_t cmdsize, ctlsize;
2265 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2266 	int interface;
2267 
2268 	/*
2269 	 * The 0648/0649 can be told to identify as a RAID controller.
2270 	 * In this case, we have to fake interface
2271 	 */
2272 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2273 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2274 		    PCIIDE_INTERFACE_SETTABLE(1);
2275 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2276 		    CMD_CONF_DSA1)
2277 			interface |= PCIIDE_INTERFACE_PCI(0) |
2278 			    PCIIDE_INTERFACE_PCI(1);
2279 	} else {
2280 		interface = PCI_INTERFACE(pa->pa_class);
2281 	}
2282 
2283 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2284 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2285 	cp->wdc_channel.channel = channel;
2286 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2287 
2288 	if (channel > 0) {
2289 		cp->wdc_channel.ch_queue =
2290 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2291 	} else {
2292 		cp->wdc_channel.ch_queue =
2293 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2294 	}
2295 	if (cp->wdc_channel.ch_queue == NULL) {
2296 		printf("%s %s channel: "
2297 		    "can't allocate memory for command queue",
2298 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2299 		    return;
2300 	}
2301 
2302 	printf("%s: %s channel %s to %s mode\n",
2303 	    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
2304 	    (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ?
2305 	    "configured" : "wired",
2306 	    (interface & PCIIDE_INTERFACE_PCI(channel)) ?
2307 	    "native-PCI" : "compatibility");
2308 
2309 	/*
2310 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2311 	 * there's no way to disable the first channel without disabling
2312 	 * the whole device
2313 	 */
2314 	if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2315 		printf("%s: %s channel ignored (disabled)\n",
2316 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2317 		return;
2318 	}
2319 
2320 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2321 	if (cp->hw_ok == 0)
2322 		return;
2323 	if (channel == 1) {
2324 		if (pciide_chan_candisable(cp)) {
2325 			ctrl &= ~CMD_CTRL_2PORT;
2326 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2327 			    CMD_CTRL, ctrl);
2328 		}
2329 	}
2330 	pciide_map_compat_intr(pa, cp, channel, interface);
2331 }
2332 
2333 int
2334 cmd_pci_intr(arg)
2335 	void *arg;
2336 {
2337 	struct pciide_softc *sc = arg;
2338 	struct pciide_channel *cp;
2339 	struct channel_softc *wdc_cp;
2340 	int i, rv, crv;
2341 	u_int32_t priirq, secirq;
2342 
2343 	rv = 0;
2344 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2345 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2346 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2347 		cp = &sc->pciide_channels[i];
2348 		wdc_cp = &cp->wdc_channel;
2349 		/* If a compat channel skip. */
2350 		if (cp->compat)
2351 			continue;
2352 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2353 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2354 			crv = wdcintr(wdc_cp);
2355 			if (crv == 0)
2356 				printf("%s:%d: bogus intr\n",
2357 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2358 			else
2359 				rv = 1;
2360 		}
2361 	}
2362 	return rv;
2363 }
2364 
2365 void
2366 cmd_chip_map(sc, pa)
2367 	struct pciide_softc *sc;
2368 	struct pci_attach_args *pa;
2369 {
2370 	int channel;
2371 
2372 	/*
2373 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2374 	 * and base adresses registers can be disabled at
2375 	 * hardware level. In this case, the device is wired
2376 	 * in compat mode and its first channel is always enabled,
2377 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2378 	 * In fact, it seems that the first channel of the CMD PCI0640
2379 	 * can't be disabled.
2380 	 */
2381 
2382 #ifdef PCIIDE_CMD064x_DISABLE
2383 	if (pciide_chipen(sc, pa) == 0)
2384 		return;
2385 #endif
2386 
2387 	printf("%s: hardware does not support DMA\n",
2388 	    sc->sc_wdcdev.sc_dev.dv_xname);
2389 	sc->sc_dma_ok = 0;
2390 
2391 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2392 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2393 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2394 
2395 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2396 		cmd_channel_map(pa, sc, channel);
2397 	}
2398 }
2399 
2400 void
2401 cmd0643_9_chip_map(sc, pa)
2402 	struct pciide_softc *sc;
2403 	struct pci_attach_args *pa;
2404 {
2405 	struct pciide_channel *cp;
2406 	int channel;
2407 	int rev = PCI_REVISION(
2408 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2409 
2410 	/*
2411 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2412 	 * and base adresses registers can be disabled at
2413 	 * hardware level. In this case, the device is wired
2414 	 * in compat mode and its first channel is always enabled,
2415 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2416 	 * In fact, it seems that the first channel of the CMD PCI0640
2417 	 * can't be disabled.
2418 	 */
2419 
2420 #ifdef PCIIDE_CMD064x_DISABLE
2421 	if (pciide_chipen(sc, pa) == 0)
2422 		return;
2423 #endif
2424 	printf("%s: bus-master DMA support present",
2425 	    sc->sc_wdcdev.sc_dev.dv_xname);
2426 	pciide_mapreg_dma(sc, pa);
2427 	printf("\n");
2428 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2429 	    WDC_CAPABILITY_MODE;
2430 	if (sc->sc_dma_ok) {
2431 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2432 		switch (sc->sc_pp->ide_product) {
2433 		case PCI_PRODUCT_CMDTECH_649:
2434 		case PCI_PRODUCT_CMDTECH_648:
2435 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2436 			sc->sc_wdcdev.UDMA_cap = 4;
2437 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2438 			break;
2439 		case PCI_PRODUCT_CMDTECH_646:
2440 			if (rev >= CMD0646U2_REV) {
2441 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2442 				sc->sc_wdcdev.UDMA_cap = 2;
2443 			} else if (rev >= CMD0646U_REV) {
2444 			/*
2445 			 * Linux's driver claims that the 646U is broken
2446 			 * with UDMA. Only enable it if we know what we're
2447 			 * doing
2448 			 */
2449 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2450 				sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2451 				sc->sc_wdcdev.UDMA_cap = 2;
2452 #endif
2453 				/* explicitely disable UDMA */
2454 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2455 				    CMD_UDMATIM(0), 0);
2456 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2457 				    CMD_UDMATIM(1), 0);
2458 			}
2459 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2460 			break;
2461 		default:
2462 			sc->sc_wdcdev.irqack = pciide_irqack;
2463 		}
2464 	}
2465 
2466 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2467 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2468 	sc->sc_wdcdev.PIO_cap = 4;
2469 	sc->sc_wdcdev.DMA_cap = 2;
2470 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2471 
2472 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2473 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2474 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2475 		DEBUG_PROBE);
2476 
2477 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2478 		cp = &sc->pciide_channels[channel];
2479 		cmd_channel_map(pa, sc, channel);
2480 		if (cp->hw_ok == 0)
2481 			continue;
2482 		cmd0643_9_setup_channel(&cp->wdc_channel);
2483 	}
2484 	/*
2485 	 * note - this also makes sure we clear the irq disable and reset
2486 	 * bits
2487 	 */
2488 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2489 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2490 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2491 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2492 	    DEBUG_PROBE);
2493 }
2494 
2495 void
2496 cmd0643_9_setup_channel(chp)
2497 	struct channel_softc *chp;
2498 {
2499 	struct ata_drive_datas *drvp;
2500 	u_int8_t tim;
2501 	u_int32_t idedma_ctl, udma_reg;
2502 	int drive;
2503 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2504 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2505 
2506 	idedma_ctl = 0;
2507 	/* setup DMA if needed */
2508 	pciide_channel_dma_setup(cp);
2509 
2510 	for (drive = 0; drive < 2; drive++) {
2511 		drvp = &chp->ch_drive[drive];
2512 		/* If no drive, skip */
2513 		if ((drvp->drive_flags & DRIVE) == 0)
2514 			continue;
2515 		/* add timing values, setup DMA if needed */
2516 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2517 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2518 			if (drvp->drive_flags & DRIVE_UDMA) {
2519 				/* UltraDMA on a 646U2, 0648 or 0649 */
2520 				drvp->drive_flags &= ~DRIVE_DMA;
2521 				udma_reg = pciide_pci_read(sc->sc_pc,
2522 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2523 				if (drvp->UDMA_mode > 2 &&
2524 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2525 				    CMD_BICSR) &
2526 				    CMD_BICSR_80(chp->channel)) == 0)
2527 					drvp->UDMA_mode = 2;
2528 				if (drvp->UDMA_mode > 2)
2529 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2530 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2531 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2532 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2533 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2534 				    CMD_UDMATIM_TIM_OFF(drive));
2535 				udma_reg |=
2536 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2537 				    CMD_UDMATIM_TIM_OFF(drive));
2538 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2539 				    CMD_UDMATIM(chp->channel), udma_reg);
2540 			} else {
2541 				/*
2542 				 * use Multiword DMA.
2543 				 * Timings will be used for both PIO and DMA,
2544 				 * so adjust DMA mode if needed
2545 				 * if we have a 0646U2/8/9, turn off UDMA
2546 				 */
2547 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2548 					udma_reg = pciide_pci_read(sc->sc_pc,
2549 					    sc->sc_tag,
2550 					    CMD_UDMATIM(chp->channel));
2551 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2552 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2553 					    CMD_UDMATIM(chp->channel),
2554 					    udma_reg);
2555 				}
2556 				if (drvp->PIO_mode >= 3 &&
2557 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2558 					drvp->DMA_mode = drvp->PIO_mode - 2;
2559 				}
2560 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2561 			}
2562 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2563 		}
2564 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2565 		    CMD_DATA_TIM(chp->channel, drive), tim);
2566 	}
2567 	if (idedma_ctl != 0) {
2568 		/* Add software bits in status register */
2569 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2570 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2571 		    idedma_ctl);
2572 	}
2573 	pciide_print_modes(cp);
2574 }
2575 
2576 void
2577 cmd646_9_irqack(chp)
2578 	struct channel_softc *chp;
2579 {
2580 	u_int32_t priirq, secirq;
2581 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2582 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2583 
2584 	if (chp->channel == 0) {
2585 		priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2586 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2587 	} else {
2588 		secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2589 		pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2590 	}
2591 	pciide_irqack(chp);
2592 }
2593 
2594 void
2595 cy693_chip_map(sc, pa)
2596 	struct pciide_softc *sc;
2597 	struct pci_attach_args *pa;
2598 {
2599 	struct pciide_channel *cp;
2600 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2601 	bus_size_t cmdsize, ctlsize;
2602 
2603 	if (pciide_chipen(sc, pa) == 0)
2604 		return;
2605 	/*
2606 	 * this chip has 2 PCI IDE functions, one for primary and one for
2607 	 * secondary. So we need to call pciide_mapregs_compat() with
2608 	 * the real channel
2609 	 */
2610 	if (pa->pa_function == 1) {
2611 		sc->sc_cy_compatchan = 0;
2612 	} else if (pa->pa_function == 2) {
2613 		sc->sc_cy_compatchan = 1;
2614 	} else {
2615 		printf("%s: unexpected PCI function %d\n",
2616 		    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
2617 		return;
2618 	}
2619 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2620 		printf("%s: bus-master DMA support present",
2621 		    sc->sc_wdcdev.sc_dev.dv_xname);
2622 		pciide_mapreg_dma(sc, pa);
2623 	} else {
2624 		printf("%s: hardware does not support DMA",
2625 		    sc->sc_wdcdev.sc_dev.dv_xname);
2626 		sc->sc_dma_ok = 0;
2627 	}
2628 	printf("\n");
2629 
2630 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2631 	if (sc->sc_cy_handle == NULL) {
2632 		printf("%s: unable to map hyperCache control registers\n",
2633 		    sc->sc_wdcdev.sc_dev.dv_xname);
2634 		sc->sc_dma_ok = 0;
2635 	}
2636 
2637 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2638 	    WDC_CAPABILITY_MODE;
2639 	if (sc->sc_dma_ok) {
2640 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2641 		sc->sc_wdcdev.irqack = pciide_irqack;
2642 	}
2643 	sc->sc_wdcdev.PIO_cap = 4;
2644 	sc->sc_wdcdev.DMA_cap = 2;
2645 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2646 
2647 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2648 	sc->sc_wdcdev.nchannels = 1;
2649 
2650 	/* Only one channel for this chip; if we are here it's enabled */
2651 	cp = &sc->pciide_channels[0];
2652 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2653 	cp->name = PCIIDE_CHANNEL_NAME(0);
2654 	cp->wdc_channel.channel = 0;
2655 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2656 	cp->wdc_channel.ch_queue =
2657 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2658 	if (cp->wdc_channel.ch_queue == NULL) {
2659 		printf("%s primary channel: "
2660 		    "can't allocate memory for command queue",
2661 		sc->sc_wdcdev.sc_dev.dv_xname);
2662 		return;
2663 	}
2664 	printf("%s: primary channel %s to ",
2665 	    sc->sc_wdcdev.sc_dev.dv_xname,
2666 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2667 	    "configured" : "wired");
2668 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2669 		printf("native-PCI");
2670 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2671 		    pciide_pci_intr);
2672 	} else {
2673 		printf("compatibility");
2674 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2675 		    &cmdsize, &ctlsize);
2676 	}
2677 	printf(" mode\n");
2678 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2679 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2680 	wdcattach(&cp->wdc_channel);
2681 	if (pciide_chan_candisable(cp)) {
2682 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2683 		    PCI_COMMAND_STATUS_REG, 0);
2684 	}
2685 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2686 	if (cp->hw_ok == 0)
2687 		return;
2688 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2689 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2690 	cy693_setup_channel(&cp->wdc_channel);
2691 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2692 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2693 }
2694 
2695 void
2696 cy693_setup_channel(chp)
2697 	struct channel_softc *chp;
2698 {
2699 	struct ata_drive_datas *drvp;
2700 	int drive;
2701 	u_int32_t cy_cmd_ctrl;
2702 	u_int32_t idedma_ctl;
2703 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2704 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2705 	int dma_mode = -1;
2706 
2707 	cy_cmd_ctrl = idedma_ctl = 0;
2708 
2709 	/* setup DMA if needed */
2710 	pciide_channel_dma_setup(cp);
2711 
2712 	for (drive = 0; drive < 2; drive++) {
2713 		drvp = &chp->ch_drive[drive];
2714 		/* If no drive, skip */
2715 		if ((drvp->drive_flags & DRIVE) == 0)
2716 			continue;
2717 		/* add timing values, setup DMA if needed */
2718 		if (drvp->drive_flags & DRIVE_DMA) {
2719 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2720 			/* use Multiword DMA */
2721 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2722 				dma_mode = drvp->DMA_mode;
2723 		}
2724 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2725 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2726 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2727 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2728 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2729 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2730 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2731 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2732 	}
2733 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2734 	chp->ch_drive[0].DMA_mode = dma_mode;
2735 	chp->ch_drive[1].DMA_mode = dma_mode;
2736 
2737 	if (dma_mode == -1)
2738 		dma_mode = 0;
2739 
2740 	if (sc->sc_cy_handle != NULL) {
2741 		/* Note: `multiple' is implied. */
2742 		cy82c693_write(sc->sc_cy_handle,
2743 		    (sc->sc_cy_compatchan == 0) ?
2744 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2745 	}
2746 
2747 	pciide_print_modes(cp);
2748 
2749 	if (idedma_ctl != 0) {
2750 		/* Add software bits in status register */
2751 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2752 		    IDEDMA_CTL, idedma_ctl);
2753 	}
2754 }
2755 
2756 static int
2757 sis_hostbr_match(pa)
2758 	struct pci_attach_args *pa;
2759 {
2760 	return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS) &&
2761 	   ((PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_645) ||
2762 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_650) ||
2763 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_730) ||
2764 	    (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_735)));
2765 }
2766 
2767 void
2768 sis_chip_map(sc, pa)
2769 	struct pciide_softc *sc;
2770 	struct pci_attach_args *pa;
2771 {
2772 	struct pciide_channel *cp;
2773 	int channel;
2774 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2775 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2776 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2777 	bus_size_t cmdsize, ctlsize;
2778 	pcitag_t pchb_tag;
2779 	pcireg_t pchb_id, pchb_class;
2780 
2781 	if (pciide_chipen(sc, pa) == 0)
2782 		return;
2783 	printf("%s: bus-master DMA support present",
2784 	    sc->sc_wdcdev.sc_dev.dv_xname);
2785 	pciide_mapreg_dma(sc, pa);
2786 	printf("\n");
2787 
2788 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2789 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2790 	/* and read ID and rev of the ISA bridge */
2791 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2792 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2793 
2794 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2795 	    WDC_CAPABILITY_MODE;
2796 	if (sc->sc_dma_ok) {
2797 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2798 		sc->sc_wdcdev.irqack = pciide_irqack;
2799 		/*
2800 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2801 		 * have problems with UDMA (info provided by Christos)
2802 		 */
2803 		if (rev >= 0xd0 &&
2804 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_530HB ||
2805 		    PCI_REVISION(pchb_class) >= 0x03))
2806 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2807 	}
2808 
2809 	sc->sc_wdcdev.PIO_cap = 4;
2810 	sc->sc_wdcdev.DMA_cap = 2;
2811 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2812 		/*
2813 		 * Use UDMA/100 on SiS 735 chipset and UDMA/33 on other
2814 		 * chipsets.
2815 		 */
2816 		sc->sc_wdcdev.UDMA_cap =
2817 		    pci_find_device(pa, sis_hostbr_match) ? 5 : 2;
2818 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2819 
2820 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2821 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2822 
2823 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2824 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2825 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2826 
2827 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2828 		cp = &sc->pciide_channels[channel];
2829 		if (pciide_chansetup(sc, channel, interface) == 0)
2830 			continue;
2831 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2832 		    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2833 			printf("%s: %s channel ignored (disabled)\n",
2834 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2835 			continue;
2836 		}
2837 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2838 		    pciide_pci_intr);
2839 		if (cp->hw_ok == 0)
2840 			continue;
2841 		if (pciide_chan_candisable(cp)) {
2842 			if (channel == 0)
2843 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2844 			else
2845 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2846 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2847 			    sis_ctr0);
2848 		}
2849 		pciide_map_compat_intr(pa, cp, channel, interface);
2850 		if (cp->hw_ok == 0)
2851 			continue;
2852 		sis_setup_channel(&cp->wdc_channel);
2853 	}
2854 }
2855 
2856 void
2857 sis_setup_channel(chp)
2858 	struct channel_softc *chp;
2859 {
2860 	struct ata_drive_datas *drvp;
2861 	int drive;
2862 	u_int32_t sis_tim;
2863 	u_int32_t idedma_ctl;
2864 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2865 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2866 
2867 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2868 	    "channel %d 0x%x\n", chp->channel,
2869 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2870 	    DEBUG_PROBE);
2871 	sis_tim = 0;
2872 	idedma_ctl = 0;
2873 	/* setup DMA if needed */
2874 	pciide_channel_dma_setup(cp);
2875 
2876 	for (drive = 0; drive < 2; drive++) {
2877 		drvp = &chp->ch_drive[drive];
2878 		/* If no drive, skip */
2879 		if ((drvp->drive_flags & DRIVE) == 0)
2880 			continue;
2881 		/* add timing values, setup DMA if needed */
2882 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2883 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2884 			goto pio;
2885 
2886 		if (drvp->drive_flags & DRIVE_UDMA) {
2887 			/* use Ultra/DMA */
2888 			drvp->drive_flags &= ~DRIVE_DMA;
2889 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2890 			    SIS_TIM_UDMA_TIME_OFF(drive);
2891 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2892 		} else {
2893 			/*
2894 			 * use Multiword DMA
2895 			 * Timings will be used for both PIO and DMA,
2896 			 * so adjust DMA mode if needed
2897 			 */
2898 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2899 				drvp->PIO_mode = drvp->DMA_mode + 2;
2900 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2901 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2902 				    drvp->PIO_mode - 2 : 0;
2903 			if (drvp->DMA_mode == 0)
2904 				drvp->PIO_mode = 0;
2905 		}
2906 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2907 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2908 		    SIS_TIM_ACT_OFF(drive);
2909 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2910 		    SIS_TIM_REC_OFF(drive);
2911 	}
2912 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2913 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2914 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2915 	if (idedma_ctl != 0) {
2916 		/* Add software bits in status register */
2917 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2918 		    IDEDMA_CTL, idedma_ctl);
2919 	}
2920 	pciide_print_modes(cp);
2921 }
2922 
2923 static int
2924 acer_isabr_match(pa)
2925 	struct pci_attach_args *pa;
2926 {
2927 	return ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALI) &&
2928 	   (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALI_M1543));
2929 }
2930 
2931 void
2932 acer_chip_map(sc, pa)
2933 	struct pciide_softc *sc;
2934 	struct pci_attach_args *pa;
2935 {
2936 	struct pci_attach_args isa_pa;
2937 	struct pciide_channel *cp;
2938 	int channel;
2939 	pcireg_t cr, interface;
2940 	bus_size_t cmdsize, ctlsize;
2941 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2942 
2943 	if (pciide_chipen(sc, pa) == 0)
2944 		return;
2945 	printf("%s: bus-master DMA support present",
2946 	    sc->sc_wdcdev.sc_dev.dv_xname);
2947 	pciide_mapreg_dma(sc, pa);
2948 	printf("\n");
2949 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2950 	    WDC_CAPABILITY_MODE;
2951 	if (sc->sc_dma_ok) {
2952 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
2953 		if (rev >= 0x20) {
2954 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2955 			if (rev >= 0xC4)
2956 				sc->sc_wdcdev.UDMA_cap = 5;
2957 			else if (rev >= 0xC2)
2958 				sc->sc_wdcdev.UDMA_cap = 4;
2959 			else
2960 				sc->sc_wdcdev.UDMA_cap = 2;
2961 		}
2962 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
2963 		sc->sc_wdcdev.irqack = pciide_irqack;
2964 	}
2965 
2966 	sc->sc_wdcdev.PIO_cap = 4;
2967 	sc->sc_wdcdev.DMA_cap = 2;
2968 	sc->sc_wdcdev.set_modes = acer_setup_channel;
2969 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2970 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2971 
2972 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
2973 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
2974 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
2975 
2976 	/* Enable "microsoft register bits" R/W. */
2977 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
2978 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
2979 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
2980 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
2981 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
2982 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
2983 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
2984 	    ~ACER_CHANSTATUSREGS_RO);
2985 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
2986 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
2987 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
2988 	/* Don't use cr, re-read the real register content instead */
2989 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
2990 	    PCI_CLASS_REG));
2991 
2992 	/* From linux: enable "Cable Detection" */
2993 	if (rev >= 0xC2) {
2994 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
2995 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
2996 		    | ACER_0x4B_CDETECT);
2997 		/* set south-bridge's enable bit, m1533, 0x79 */
2998 		if (pci_find_device(&isa_pa, acer_isabr_match) == 0) {
2999 			printf("%s: can't find PCI/ISA bridge, downgrading "
3000 			    "to Ultra/33\n", sc->sc_wdcdev.sc_dev.dv_xname);
3001 			sc->sc_wdcdev.UDMA_cap = 2;
3002 		} else {
3003 			if (rev == 0xC2)
3004 				/* 1543C-B0 (m1533, 0x79, bit 2) */
3005 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3006 				    ACER_0x79,
3007 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3008 					ACER_0x79)
3009 				    | ACER_0x79_REVC2_EN);
3010 			else
3011 				/* 1553/1535 (m1533, 0x79, bit 1) */
3012 				pciide_pci_write(isa_pa.pa_pc, isa_pa.pa_tag,
3013 				    ACER_0x79,
3014 				    pciide_pci_read(isa_pa.pa_pc, isa_pa.pa_tag,
3015 					ACER_0x79)
3016 				    | ACER_0x79_EN);
3017 		}
3018 	}
3019 
3020 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3021 		cp = &sc->pciide_channels[channel];
3022 		if (pciide_chansetup(sc, channel, interface) == 0)
3023 			continue;
3024 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3025 			printf("%s: %s channel ignored (disabled)\n",
3026 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3027 			continue;
3028 		}
3029 		/* newer controllers seems to lack the ACER_CHIDS. Sigh */
3030 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3031 		     (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3032 		if (cp->hw_ok == 0)
3033 			continue;
3034 		if (pciide_chan_candisable(cp)) {
3035 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3036 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3037 			    PCI_CLASS_REG, cr);
3038 		}
3039 		pciide_map_compat_intr(pa, cp, channel, interface);
3040 		acer_setup_channel(&cp->wdc_channel);
3041 	}
3042 }
3043 
3044 void
3045 acer_setup_channel(chp)
3046 	struct channel_softc *chp;
3047 {
3048 	struct ata_drive_datas *drvp;
3049 	int drive;
3050 	u_int32_t acer_fifo_udma;
3051 	u_int32_t idedma_ctl;
3052 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3053 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3054 
3055 	idedma_ctl = 0;
3056 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3057 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3058 	    acer_fifo_udma), DEBUG_PROBE);
3059 	/* setup DMA if needed */
3060 	pciide_channel_dma_setup(cp);
3061 
3062 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3063 	    DRIVE_UDMA) { /* check 80 pins cable */
3064 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3065 		    ACER_0x4A_80PIN(chp->channel)) {
3066 			if (chp->ch_drive[0].UDMA_mode > 2)
3067 				chp->ch_drive[0].UDMA_mode = 2;
3068 			if (chp->ch_drive[1].UDMA_mode > 2)
3069 				chp->ch_drive[1].UDMA_mode = 2;
3070 		}
3071 	}
3072 
3073 	for (drive = 0; drive < 2; drive++) {
3074 		drvp = &chp->ch_drive[drive];
3075 		/* If no drive, skip */
3076 		if ((drvp->drive_flags & DRIVE) == 0)
3077 			continue;
3078 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3079 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3080 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3081 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3082 		/* clear FIFO/DMA mode */
3083 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3084 		    ACER_UDMA_EN(chp->channel, drive) |
3085 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3086 
3087 		/* add timing values, setup DMA if needed */
3088 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3089 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3090 			acer_fifo_udma |=
3091 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3092 			goto pio;
3093 		}
3094 
3095 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3096 		if (drvp->drive_flags & DRIVE_UDMA) {
3097 			/* use Ultra/DMA */
3098 			drvp->drive_flags &= ~DRIVE_DMA;
3099 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3100 			acer_fifo_udma |=
3101 			    ACER_UDMA_TIM(chp->channel, drive,
3102 				acer_udma[drvp->UDMA_mode]);
3103 			/* XXX disable if one drive < UDMA3 ? */
3104 			if (drvp->UDMA_mode >= 3) {
3105 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3106 				    ACER_0x4B,
3107 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3108 					ACER_0x4B) | ACER_0x4B_UDMA66);
3109 			}
3110 		} else {
3111 			/*
3112 			 * use Multiword DMA
3113 			 * Timings will be used for both PIO and DMA,
3114 			 * so adjust DMA mode if needed
3115 			 */
3116 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3117 				drvp->PIO_mode = drvp->DMA_mode + 2;
3118 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3119 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3120 				    drvp->PIO_mode - 2 : 0;
3121 			if (drvp->DMA_mode == 0)
3122 				drvp->PIO_mode = 0;
3123 		}
3124 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3125 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3126 		    ACER_IDETIM(chp->channel, drive),
3127 		    acer_pio[drvp->PIO_mode]);
3128 	}
3129 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3130 	    acer_fifo_udma), DEBUG_PROBE);
3131 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3132 	if (idedma_ctl != 0) {
3133 		/* Add software bits in status register */
3134 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3135 		    IDEDMA_CTL, idedma_ctl);
3136 	}
3137 	pciide_print_modes(cp);
3138 }
3139 
3140 int
3141 acer_pci_intr(arg)
3142 	void *arg;
3143 {
3144 	struct pciide_softc *sc = arg;
3145 	struct pciide_channel *cp;
3146 	struct channel_softc *wdc_cp;
3147 	int i, rv, crv;
3148 	u_int32_t chids;
3149 
3150 	rv = 0;
3151 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3152 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3153 		cp = &sc->pciide_channels[i];
3154 		wdc_cp = &cp->wdc_channel;
3155 		/* If a compat channel skip. */
3156 		if (cp->compat)
3157 			continue;
3158 		if (chids & ACER_CHIDS_INT(i)) {
3159 			crv = wdcintr(wdc_cp);
3160 			if (crv == 0)
3161 				printf("%s:%d: bogus intr\n",
3162 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3163 			else
3164 				rv = 1;
3165 		}
3166 	}
3167 	return rv;
3168 }
3169 
3170 void
3171 hpt_chip_map(sc, pa)
3172 	struct pciide_softc *sc;
3173 	struct pci_attach_args *pa;
3174 {
3175 	struct pciide_channel *cp;
3176 	int i, compatchan, revision;
3177 	pcireg_t interface;
3178 	bus_size_t cmdsize, ctlsize;
3179 
3180 	if (pciide_chipen(sc, pa) == 0)
3181 		return;
3182 	revision = PCI_REVISION(pa->pa_class);
3183 	printf(": Triones/Highpoint ");
3184 	if (revision == HPT370_REV)
3185 		printf("HPT370 IDE Controller\n");
3186 	else if (revision == HPT370A_REV)
3187 		printf("HPT370A IDE Controller\n");
3188 	else if (revision == HPT366_REV)
3189 		printf("HPT366 IDE Controller\n");
3190 	else
3191 		printf("unknown HPT IDE controller rev %d\n", revision);
3192 
3193 	/*
3194 	 * when the chip is in native mode it identifies itself as a
3195 	 * 'misc mass storage'. Fake interface in this case.
3196 	 */
3197 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3198 		interface = PCI_INTERFACE(pa->pa_class);
3199 	} else {
3200 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3201 		    PCIIDE_INTERFACE_PCI(0);
3202 		if (revision == HPT370_REV || revision == HPT370A_REV)
3203 			interface |= PCIIDE_INTERFACE_PCI(1);
3204 	}
3205 
3206 	printf("%s: bus-master DMA support present",
3207 		sc->sc_wdcdev.sc_dev.dv_xname);
3208 	pciide_mapreg_dma(sc, pa);
3209 	printf("\n");
3210 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3211 	    WDC_CAPABILITY_MODE;
3212 	if (sc->sc_dma_ok) {
3213 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3214 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3215 		sc->sc_wdcdev.irqack = pciide_irqack;
3216 	}
3217 	sc->sc_wdcdev.PIO_cap = 4;
3218 	sc->sc_wdcdev.DMA_cap = 2;
3219 
3220 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3221 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3222 	if (revision == HPT366_REV) {
3223 		sc->sc_wdcdev.UDMA_cap = 4;
3224 		/*
3225 		 * The 366 has 2 PCI IDE functions, one for primary and one
3226 		 * for secondary. So we need to call pciide_mapregs_compat()
3227 		 * with the real channel
3228 		 */
3229 		if (pa->pa_function == 0) {
3230 			compatchan = 0;
3231 		} else if (pa->pa_function == 1) {
3232 			compatchan = 1;
3233 		} else {
3234 			printf("%s: unexpected PCI function %d\n",
3235 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3236 			return;
3237 		}
3238 		sc->sc_wdcdev.nchannels = 1;
3239 	} else {
3240 		sc->sc_wdcdev.nchannels = 2;
3241 		sc->sc_wdcdev.UDMA_cap = 5;
3242 	}
3243 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3244 		cp = &sc->pciide_channels[i];
3245 		if (sc->sc_wdcdev.nchannels > 1) {
3246 			compatchan = i;
3247 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3248 			   HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3249 				printf("%s: %s channel ignored (disabled)\n",
3250 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3251 				continue;
3252 			}
3253 		}
3254 		if (pciide_chansetup(sc, i, interface) == 0)
3255 			continue;
3256 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3257 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3258 			    &ctlsize, hpt_pci_intr);
3259 		} else {
3260 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3261 			    &cmdsize, &ctlsize);
3262 		}
3263 		if (cp->hw_ok == 0)
3264 			return;
3265 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3266 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3267 		wdcattach(&cp->wdc_channel);
3268 		hpt_setup_channel(&cp->wdc_channel);
3269 	}
3270 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3271 		/*
3272 		 * HPT370_REV has a bit to disable interrupts, make sure
3273 		 * to clear it
3274 		 */
3275 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3276 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3277 		    ~HPT_CSEL_IRQDIS);
3278 	}
3279 	return;
3280 }
3281 
3282 void
3283 hpt_setup_channel(chp)
3284 	struct channel_softc *chp;
3285 {
3286 	struct ata_drive_datas *drvp;
3287 	int drive;
3288 	int cable;
3289 	u_int32_t before, after;
3290 	u_int32_t idedma_ctl;
3291 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3292 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3293 
3294 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3295 
3296 	/* setup DMA if needed */
3297 	pciide_channel_dma_setup(cp);
3298 
3299 	idedma_ctl = 0;
3300 
3301 	/* Per drive settings */
3302 	for (drive = 0; drive < 2; drive++) {
3303 		drvp = &chp->ch_drive[drive];
3304 		/* If no drive, skip */
3305 		if ((drvp->drive_flags & DRIVE) == 0)
3306 			continue;
3307 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3308 					HPT_IDETIM(chp->channel, drive));
3309 
3310 		/* add timing values, setup DMA if needed */
3311 		if (drvp->drive_flags & DRIVE_UDMA) {
3312 			/* use Ultra/DMA */
3313 			drvp->drive_flags &= ~DRIVE_DMA;
3314 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3315 			    drvp->UDMA_mode > 2)
3316 				drvp->UDMA_mode = 2;
3317 			after = (sc->sc_wdcdev.nchannels == 2) ?
3318 			    hpt370_udma[drvp->UDMA_mode] :
3319 			    hpt366_udma[drvp->UDMA_mode];
3320 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3321 		} else if (drvp->drive_flags & DRIVE_DMA) {
3322 			/*
3323 			 * use Multiword DMA.
3324 			 * Timings will be used for both PIO and DMA, so adjust
3325 			 * DMA mode if needed
3326 			 */
3327 			if (drvp->PIO_mode >= 3 &&
3328 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3329 				drvp->DMA_mode = drvp->PIO_mode - 2;
3330 			}
3331 			after = (sc->sc_wdcdev.nchannels == 2) ?
3332 			    hpt370_dma[drvp->DMA_mode] :
3333 			    hpt366_dma[drvp->DMA_mode];
3334 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3335 		} else {
3336 			/* PIO only */
3337 			after = (sc->sc_wdcdev.nchannels == 2) ?
3338 			    hpt370_pio[drvp->PIO_mode] :
3339 			    hpt366_pio[drvp->PIO_mode];
3340 		}
3341 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3342 		    HPT_IDETIM(chp->channel, drive), after);
3343 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3344 		    "(BIOS 0x%08x)\n", drvp->drv_softc->dv_xname,
3345 		    after, before), DEBUG_PROBE);
3346 	}
3347 	if (idedma_ctl != 0) {
3348 		/* Add software bits in status register */
3349 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3350 		    IDEDMA_CTL, idedma_ctl);
3351 	}
3352 	pciide_print_modes(cp);
3353 }
3354 
3355 int
3356 hpt_pci_intr(arg)
3357 	void *arg;
3358 {
3359 	struct pciide_softc *sc = arg;
3360 	struct pciide_channel *cp;
3361 	struct channel_softc *wdc_cp;
3362 	int rv = 0;
3363 	int dmastat, i, crv;
3364 
3365 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3366 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3367 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3368 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3369 			continue;
3370 		cp = &sc->pciide_channels[i];
3371 		wdc_cp = &cp->wdc_channel;
3372 		crv = wdcintr(wdc_cp);
3373 		if (crv == 0) {
3374 			printf("%s:%d: bogus intr\n",
3375 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3376 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3377 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3378 		} else
3379 			rv = 1;
3380 	}
3381 	return rv;
3382 }
3383 
3384 
3385 /* Macros to test product */
3386 #define PDC_IS_262(sc)							\
3387 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA66 ||	\
3388 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3389 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3390 #define PDC_IS_265(sc)							\
3391 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100 ||	\
3392 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_ULTRA100X)
3393 
3394 void
3395 pdc202xx_chip_map(sc, pa)
3396 	struct pciide_softc *sc;
3397 	struct pci_attach_args *pa;
3398 {
3399 	struct pciide_channel *cp;
3400 	int channel;
3401 	pcireg_t interface, st, mode;
3402 	bus_size_t cmdsize, ctlsize;
3403 
3404 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3405 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3406 	    DEBUG_PROBE);
3407 	if (pciide_chipen(sc, pa) == 0)
3408 		return;
3409 
3410 	/* turn off  RAID mode */
3411 	st &= ~PDC2xx_STATE_IDERAID;
3412 
3413 	/*
3414 	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3415 	 * mode. We have to fake interface
3416 	 */
3417 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3418 	if (st & PDC2xx_STATE_NATIVE)
3419 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3420 
3421 	printf("%s: bus-master DMA support present",
3422 	    sc->sc_wdcdev.sc_dev.dv_xname);
3423 	pciide_mapreg_dma(sc, pa);
3424 	printf("\n");
3425 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3426 	    WDC_CAPABILITY_MODE;
3427 	if (sc->sc_dma_ok) {
3428 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3429 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3430 		sc->sc_wdcdev.irqack = pciide_irqack;
3431 	}
3432 	sc->sc_wdcdev.PIO_cap = 4;
3433 	sc->sc_wdcdev.DMA_cap = 2;
3434 	if (PDC_IS_265(sc))
3435 		sc->sc_wdcdev.UDMA_cap = 5;
3436 	else if (PDC_IS_262(sc))
3437 		sc->sc_wdcdev.UDMA_cap = 4;
3438 	else
3439 		sc->sc_wdcdev.UDMA_cap = 2;
3440 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3441 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3442 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3443 
3444 	/* setup failsafe defaults */
3445 	mode = 0;
3446 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3447 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3448 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3449 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3450 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3451 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3452 		    "initial timings  0x%x, now 0x%x\n", channel,
3453 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3454 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3455 		    DEBUG_PROBE);
3456 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3457 		    mode | PDC2xx_TIM_IORDYp);
3458 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3459 		    "initial timings  0x%x, now 0x%x\n", channel,
3460 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3461 		    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3462 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3463 		    mode);
3464 	}
3465 
3466 	mode = PDC2xx_SCR_DMA;
3467 	if (PDC_IS_262(sc)) {
3468 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3469 	} else {
3470 		/* the BIOS set it up this way */
3471 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3472 	}
3473 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3474 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3475 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3476 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3477 	    DEBUG_PROBE);
3478 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3479 
3480 	/* controller initial state register is OK even without BIOS */
3481 	/* Set DMA mode to IDE DMA compatibility */
3482 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3483 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3484 	    DEBUG_PROBE);
3485 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3486 	    mode | 0x1);
3487 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3488 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3489 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3490 	    mode | 0x1);
3491 
3492 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3493 		cp = &sc->pciide_channels[channel];
3494 		if (pciide_chansetup(sc, channel, interface) == 0)
3495 			continue;
3496 		if ((st & (PDC_IS_262(sc) ?
3497 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3498 			printf("%s: %s channel ignored (disabled)\n",
3499 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3500 			continue;
3501 		}
3502 		if (PDC_IS_265(sc))
3503 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3504 			    pdc20265_pci_intr);
3505 		else
3506 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3507 			    pdc202xx_pci_intr);
3508 		if (cp->hw_ok == 0)
3509 			continue;
3510 		if (pciide_chan_candisable(cp))
3511 			st &= ~(PDC_IS_262(sc) ?
3512 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3513 		pciide_map_compat_intr(pa, cp, channel, interface);
3514 		pdc202xx_setup_channel(&cp->wdc_channel);
3515 	}
3516 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3517 	    DEBUG_PROBE);
3518 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3519 	return;
3520 }
3521 
3522 void
3523 pdc202xx_setup_channel(chp)
3524 	struct channel_softc *chp;
3525 {
3526 	struct ata_drive_datas *drvp;
3527 	int drive;
3528 	pcireg_t mode, st;
3529 	u_int32_t idedma_ctl, scr, atapi;
3530 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3531 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3532 	int channel = chp->channel;
3533 
3534 	/* setup DMA if needed */
3535 	pciide_channel_dma_setup(cp);
3536 
3537 	idedma_ctl = 0;
3538 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3539 	    sc->sc_wdcdev.sc_dev.dv_xname,
3540 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3541 	    DEBUG_PROBE);
3542 
3543 	/* Per channel settings */
3544 	if (PDC_IS_262(sc)) {
3545 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3546 		    PDC262_U66);
3547 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3548 		/* Trimm UDMA mode */
3549 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3550 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3551 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3552 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3553 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3554 			if (chp->ch_drive[0].UDMA_mode > 2)
3555 				chp->ch_drive[0].UDMA_mode = 2;
3556 			if (chp->ch_drive[1].UDMA_mode > 2)
3557 				chp->ch_drive[1].UDMA_mode = 2;
3558 		}
3559 		/* Set U66 if needed */
3560 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3561 		    chp->ch_drive[0].UDMA_mode > 2) ||
3562 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3563 		    chp->ch_drive[1].UDMA_mode > 2))
3564 			scr |= PDC262_U66_EN(channel);
3565 		else
3566 			scr &= ~PDC262_U66_EN(channel);
3567 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3568 		    PDC262_U66, scr);
3569 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3570 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3571 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3572 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3573 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3574 			chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3575 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3576 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3577 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3578 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3579 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3580 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3581 				atapi = 0;
3582 			else
3583 				atapi = PDC262_ATAPI_UDMA;
3584 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3585 			    PDC262_ATAPI(channel), atapi);
3586 		}
3587 	}
3588 	for (drive = 0; drive < 2; drive++) {
3589 		drvp = &chp->ch_drive[drive];
3590 		/* If no drive, skip */
3591 		if ((drvp->drive_flags & DRIVE) == 0)
3592 			continue;
3593 		mode = 0;
3594 		if (drvp->drive_flags & DRIVE_UDMA) {
3595 			/* use Ultra/DMA */
3596 			drvp->drive_flags &= ~DRIVE_DMA;
3597 			mode = PDC2xx_TIM_SET_MB(mode,
3598 			    pdc2xx_udma_mb[drvp->UDMA_mode]);
3599 			mode = PDC2xx_TIM_SET_MC(mode,
3600 			    pdc2xx_udma_mc[drvp->UDMA_mode]);
3601 			drvp->drive_flags &= ~DRIVE_DMA;
3602 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3603 		} else if (drvp->drive_flags & DRIVE_DMA) {
3604 			mode = PDC2xx_TIM_SET_MB(mode,
3605 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3606 			mode = PDC2xx_TIM_SET_MC(mode,
3607 			    pdc2xx_dma_mc[drvp->DMA_mode]);
3608 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3609 		} else {
3610 			mode = PDC2xx_TIM_SET_MB(mode,
3611 			    pdc2xx_dma_mb[0]);
3612 			mode = PDC2xx_TIM_SET_MC(mode,
3613 			    pdc2xx_dma_mc[0]);
3614 		}
3615 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3616 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3617 		if (drvp->drive_flags & DRIVE_ATA)
3618 			mode |= PDC2xx_TIM_PRE;
3619 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3620 		if (drvp->PIO_mode >= 3) {
3621 			mode |= PDC2xx_TIM_IORDY;
3622 			if (drive == 0)
3623 				mode |= PDC2xx_TIM_IORDYp;
3624 		}
3625 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3626 		    "timings 0x%x\n",
3627 		    sc->sc_wdcdev.sc_dev.dv_xname,
3628 		    chp->channel, drive, mode), DEBUG_PROBE);
3629 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3630 		    PDC2xx_TIM(chp->channel, drive), mode);
3631 	}
3632 	if (idedma_ctl != 0) {
3633 		/* Add software bits in status register */
3634 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3635 		    IDEDMA_CTL, idedma_ctl);
3636 	}
3637 	pciide_print_modes(cp);
3638 }
3639 
3640 int
3641 pdc202xx_pci_intr(arg)
3642 	void *arg;
3643 {
3644 	struct pciide_softc *sc = arg;
3645 	struct pciide_channel *cp;
3646 	struct channel_softc *wdc_cp;
3647 	int i, rv, crv;
3648 	u_int32_t scr;
3649 
3650 	rv = 0;
3651 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3652 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3653 		cp = &sc->pciide_channels[i];
3654 		wdc_cp = &cp->wdc_channel;
3655 		/* If a compat channel skip. */
3656 		if (cp->compat)
3657 			continue;
3658 		if (scr & PDC2xx_SCR_INT(i)) {
3659 			crv = wdcintr(wdc_cp);
3660 			if (crv == 0)
3661 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3662 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3663 			else
3664 				rv = 1;
3665 		}
3666 	}
3667 	return rv;
3668 }
3669 
3670 int
3671 pdc20265_pci_intr(arg)
3672 	void *arg;
3673 {
3674 	struct pciide_softc *sc = arg;
3675 	struct pciide_channel *cp;
3676 	struct channel_softc *wdc_cp;
3677 	int i, rv, crv;
3678 	u_int32_t dmastat;
3679 
3680 	rv = 0;
3681 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3682 		cp = &sc->pciide_channels[i];
3683 		wdc_cp = &cp->wdc_channel;
3684 		/* If a compat channel skip. */
3685 		if (cp->compat)
3686 			continue;
3687 		/*
3688 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3689 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3690 		 * So use it instead (requires 2 reg reads instead of 1,
3691 		 * but we can't do it another way).
3692 		 */
3693 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3694 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3695 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3696 			continue;
3697 		crv = wdcintr(wdc_cp);
3698 		if (crv == 0)
3699 			printf("%s:%d: bogus intr\n",
3700 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3701 		else
3702 			rv = 1;
3703 	}
3704 	return rv;
3705 }
3706 
3707 void
3708 opti_chip_map(sc, pa)
3709 	struct pciide_softc *sc;
3710 	struct pci_attach_args *pa;
3711 {
3712 	struct pciide_channel *cp;
3713 	bus_size_t cmdsize, ctlsize;
3714 	pcireg_t interface;
3715 	u_int8_t init_ctrl;
3716 	int channel;
3717 
3718 	if (pciide_chipen(sc, pa) == 0)
3719 		return;
3720 	printf("%s: bus-master DMA support present",
3721 	    sc->sc_wdcdev.sc_dev.dv_xname);
3722 
3723 	/*
3724 	 * XXXSCW:
3725 	 * There seem to be a couple of buggy revisions/implementations
3726 	 * of the OPTi pciide chipset. This kludge seems to fix one of
3727 	 * the reported problems (PR/11644) but still fails for the
3728 	 * other (PR/13151), although the latter may be due to other
3729 	 * issues too...
3730 	 */
3731 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
3732 		printf(" but disabled due to chip rev. <= 0x12");
3733 		sc->sc_dma_ok = 0;
3734 		sc->sc_wdcdev.cap = 0;
3735 	} else {
3736 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3737 		pciide_mapreg_dma(sc, pa);
3738 	}
3739 	printf("\n");
3740 
3741 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3742 	sc->sc_wdcdev.PIO_cap = 4;
3743 	if (sc->sc_dma_ok) {
3744 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3745 		sc->sc_wdcdev.irqack = pciide_irqack;
3746 		sc->sc_wdcdev.DMA_cap = 2;
3747 	}
3748 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3749 
3750 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3751 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3752 
3753 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3754 	    OPTI_REG_INIT_CONTROL);
3755 
3756 	interface = PCI_INTERFACE(pa->pa_class);
3757 
3758 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3759 		cp = &sc->pciide_channels[channel];
3760 		if (pciide_chansetup(sc, channel, interface) == 0)
3761 			continue;
3762 		if (channel == 1 &&
3763 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3764 			printf("%s: %s channel ignored (disabled)\n",
3765 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3766 			continue;
3767 		}
3768 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3769 		    pciide_pci_intr);
3770 		if (cp->hw_ok == 0)
3771 			continue;
3772 		pciide_map_compat_intr(pa, cp, channel, interface);
3773 		if (cp->hw_ok == 0)
3774 			continue;
3775 		opti_setup_channel(&cp->wdc_channel);
3776 	}
3777 }
3778 
3779 void
3780 opti_setup_channel(chp)
3781 	struct channel_softc *chp;
3782 {
3783 	struct ata_drive_datas *drvp;
3784 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3785 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3786 	int drive, spd;
3787 	int mode[2];
3788 	u_int8_t rv, mr;
3789 
3790 	/*
3791 	 * The `Delay' and `Address Setup Time' fields of the
3792 	 * Miscellaneous Register are always zero initially.
3793 	 */
3794 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3795 	mr &= ~(OPTI_MISC_DELAY_MASK |
3796 		OPTI_MISC_ADDR_SETUP_MASK |
3797 		OPTI_MISC_INDEX_MASK);
3798 
3799 	/* Prime the control register before setting timing values */
3800 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3801 
3802 	/* Determine the clockrate of the PCIbus the chip is attached to */
3803 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3804 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3805 
3806 	/* setup DMA if needed */
3807 	pciide_channel_dma_setup(cp);
3808 
3809 	for (drive = 0; drive < 2; drive++) {
3810 		drvp = &chp->ch_drive[drive];
3811 		/* If no drive, skip */
3812 		if ((drvp->drive_flags & DRIVE) == 0) {
3813 			mode[drive] = -1;
3814 			continue;
3815 		}
3816 
3817 		if ((drvp->drive_flags & DRIVE_DMA)) {
3818 			/*
3819 			 * Timings will be used for both PIO and DMA,
3820 			 * so adjust DMA mode if needed
3821 			 */
3822 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3823 				drvp->PIO_mode = drvp->DMA_mode + 2;
3824 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3825 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3826 				    drvp->PIO_mode - 2 : 0;
3827 			if (drvp->DMA_mode == 0)
3828 				drvp->PIO_mode = 0;
3829 
3830 			mode[drive] = drvp->DMA_mode + 5;
3831 		} else
3832 			mode[drive] = drvp->PIO_mode;
3833 
3834 		if (drive && mode[0] >= 0 &&
3835 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3836 			/*
3837 			 * Can't have two drives using different values
3838 			 * for `Address Setup Time'.
3839 			 * Slow down the faster drive to compensate.
3840 			 */
3841 			int d = (opti_tim_as[spd][mode[0]] >
3842 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3843 
3844 			mode[d] = mode[1-d];
3845 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3846 			chp->ch_drive[d].DMA_mode = 0;
3847 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3848 		}
3849 	}
3850 
3851 	for (drive = 0; drive < 2; drive++) {
3852 		int m;
3853 		if ((m = mode[drive]) < 0)
3854 			continue;
3855 
3856 		/* Set the Address Setup Time and select appropriate index */
3857 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3858 		rv |= OPTI_MISC_INDEX(drive);
3859 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3860 
3861 		/* Set the pulse width and recovery timing parameters */
3862 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3863 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3864 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3865 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3866 
3867 		/* Set the Enhanced Mode register appropriately */
3868 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3869 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3870 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3871 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3872 	}
3873 
3874 	/* Finally, enable the timings */
3875 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3876 
3877 	pciide_print_modes(cp);
3878 }
3879 
3880 #define	ACARD_IS_850(sc)						\
3881 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
3882 
3883 void
3884 acard_chip_map(sc, pa)
3885 	struct pciide_softc *sc;
3886 	struct pci_attach_args *pa;
3887 {
3888 	struct pciide_channel *cp;
3889 	int i;
3890 	pcireg_t interface;
3891 	bus_size_t cmdsize, ctlsize;
3892 
3893 	if (pciide_chipen(sc, pa) == 0)
3894 		return;
3895 
3896 	/*
3897 	 * when the chip is in native mode it identifies itself as a
3898 	 * 'misc mass storage'. Fake interface in this case.
3899 	 */
3900 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3901 		interface = PCI_INTERFACE(pa->pa_class);
3902 	} else {
3903 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3904 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3905 	}
3906 
3907 	printf("%s: bus-master DMA support present",
3908 	    sc->sc_wdcdev.sc_dev.dv_xname);
3909 	pciide_mapreg_dma(sc, pa);
3910 	printf("\n");
3911 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3912 	    WDC_CAPABILITY_MODE;
3913 
3914 	if (sc->sc_dma_ok) {
3915 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3916 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3917 		sc->sc_wdcdev.irqack = pciide_irqack;
3918 	}
3919 	sc->sc_wdcdev.PIO_cap = 4;
3920 	sc->sc_wdcdev.DMA_cap = 2;
3921 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
3922 
3923 	sc->sc_wdcdev.set_modes = acard_setup_channel;
3924 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3925 	sc->sc_wdcdev.nchannels = 2;
3926 
3927 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3928 		cp = &sc->pciide_channels[i];
3929 		if (pciide_chansetup(sc, i, interface) == 0)
3930 			continue;
3931 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3932 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3933 			    &ctlsize, pciide_pci_intr);
3934 		} else {
3935 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
3936 			    &cmdsize, &ctlsize);
3937 		}
3938 		if (cp->hw_ok == 0)
3939 			return;
3940 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3941 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3942 		wdcattach(&cp->wdc_channel);
3943 		acard_setup_channel(&cp->wdc_channel);
3944 	}
3945 	if (!ACARD_IS_850(sc)) {
3946 		u_int32_t reg;
3947 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
3948 		reg &= ~ATP860_CTRL_INT;
3949 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
3950 	}
3951 }
3952 
3953 void
3954 acard_setup_channel(chp)
3955 	struct channel_softc *chp;
3956 {
3957 	struct ata_drive_datas *drvp;
3958 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3959 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3960 	int channel = chp->channel;
3961 	int drive;
3962 	u_int32_t idetime, udma_mode;
3963 	u_int32_t idedma_ctl;
3964 
3965 	/* setup DMA if needed */
3966 	pciide_channel_dma_setup(cp);
3967 
3968 	if (ACARD_IS_850(sc)) {
3969 		idetime = 0;
3970 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
3971 		udma_mode &= ~ATP850_UDMA_MASK(channel);
3972 	} else {
3973 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
3974 		idetime &= ~ATP860_SETTIME_MASK(channel);
3975 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
3976 		udma_mode &= ~ATP860_UDMA_MASK(channel);
3977 
3978 		/* check 80 pins cable */
3979 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) ||
3980 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
3981 			if (pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
3982 			    & ATP860_CTRL_80P(chp->channel)) {
3983 				if (chp->ch_drive[0].UDMA_mode > 2)
3984 					chp->ch_drive[0].UDMA_mode = 2;
3985 				if (chp->ch_drive[1].UDMA_mode > 2)
3986 					chp->ch_drive[1].UDMA_mode = 2;
3987 			}
3988 		}
3989 	}
3990 
3991 	idedma_ctl = 0;
3992 
3993 	/* Per drive settings */
3994 	for (drive = 0; drive < 2; drive++) {
3995 		drvp = &chp->ch_drive[drive];
3996 		/* If no drive, skip */
3997 		if ((drvp->drive_flags & DRIVE) == 0)
3998 			continue;
3999 		/* add timing values, setup DMA if needed */
4000 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4001 		    (drvp->drive_flags & DRIVE_UDMA)) {
4002 			/* use Ultra/DMA */
4003 			if (ACARD_IS_850(sc)) {
4004 				idetime |= ATP850_SETTIME(drive,
4005 				    acard_act_udma[drvp->UDMA_mode],
4006 				    acard_rec_udma[drvp->UDMA_mode]);
4007 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
4008 				    acard_udma_conf[drvp->UDMA_mode]);
4009 			} else {
4010 				idetime |= ATP860_SETTIME(channel, drive,
4011 				    acard_act_udma[drvp->UDMA_mode],
4012 				    acard_rec_udma[drvp->UDMA_mode]);
4013 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
4014 				    acard_udma_conf[drvp->UDMA_mode]);
4015 			}
4016 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4017 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4018 		    (drvp->drive_flags & DRIVE_DMA)) {
4019 			/* use Multiword DMA */
4020 			drvp->drive_flags &= ~DRIVE_UDMA;
4021 			if (ACARD_IS_850(sc)) {
4022 				idetime |= ATP850_SETTIME(drive,
4023 				    acard_act_dma[drvp->DMA_mode],
4024 				    acard_rec_dma[drvp->DMA_mode]);
4025 			} else {
4026 				idetime |= ATP860_SETTIME(channel, drive,
4027 				    acard_act_dma[drvp->DMA_mode],
4028 				    acard_rec_dma[drvp->DMA_mode]);
4029 			}
4030 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4031 		} else {
4032 			/* PIO only */
4033 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4034 			if (ACARD_IS_850(sc)) {
4035 				idetime |= ATP850_SETTIME(drive,
4036 				    acard_act_pio[drvp->PIO_mode],
4037 				    acard_rec_pio[drvp->PIO_mode]);
4038 			} else {
4039 				idetime |= ATP860_SETTIME(channel, drive,
4040 				    acard_act_pio[drvp->PIO_mode],
4041 				    acard_rec_pio[drvp->PIO_mode]);
4042 			}
4043 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4044 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4045 		    | ATP8x0_CTRL_EN(channel));
4046 		}
4047 	}
4048 
4049 	if (idedma_ctl != 0) {
4050 		/* Add software bits in status register */
4051 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4052 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4053 	}
4054 	pciide_print_modes(cp);
4055 
4056 	if (ACARD_IS_850(sc)) {
4057 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4058 		    ATP850_IDETIME(channel), idetime);
4059 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4060 	} else {
4061 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4062 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4063 	}
4064 }
4065 
4066 int
4067 acard_pci_intr(arg)
4068 	void *arg;
4069 {
4070 	struct pciide_softc *sc = arg;
4071 	struct pciide_channel *cp;
4072 	struct channel_softc *wdc_cp;
4073 	int rv = 0;
4074 	int dmastat, i, crv;
4075 
4076 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4077 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4078 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4079 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4080 			continue;
4081 		cp = &sc->pciide_channels[i];
4082 		wdc_cp = &cp->wdc_channel;
4083 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4084 			(void)wdcintr(wdc_cp);
4085 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4086 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4087 			continue;
4088 		}
4089 		crv = wdcintr(wdc_cp);
4090 		if (crv == 0)
4091 			printf("%s:%d: bogus intr\n",
4092 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4093 		else if (crv == 1)
4094 			rv = 1;
4095 		else if (rv == 0)
4096 			rv = crv;
4097 	}
4098 	return rv;
4099 }
4100