xref: /openbsd-src/sys/dev/pci/pciide.c (revision 8445c53715e7030056b779e8ab40efb7820981f2)
1 /*	$OpenBSD: pciide.c,v 1.67 2001/09/11 20:05:25 miod Exp $	*/
2 /*	$NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $	*/
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 /*
37  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *      This product includes software developed by Christopher G. Demetriou
50  *	for the NetBSD Project.
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  */
65 
66 /*
67  * PCI IDE controller driver.
68  *
69  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70  * sys/dev/pci/ppb.c, revision 1.16).
71  *
72  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74  * 5/16/94" from the PCI SIG.
75  *
76  */
77 
78 #define DEBUG_DMA   0x01
79 #define DEBUG_XFERS  0x02
80 #define DEBUG_FUNCS  0x08
81 #define DEBUG_PROBE  0x10
82 
83 #ifdef WDCDEBUG
84 int wdcdebug_pciide_mask = 0;
85 #define WDCDEBUG_PRINT(args, level) \
86 	if (wdcdebug_pciide_mask & (level)) printf args
87 #else
88 #define WDCDEBUG_PRINT(args, level)
89 #endif
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <sys/malloc.h>
94 
95 #include <vm/vm.h>
96 
97 #include <machine/endian.h>
98 
99 #include <dev/pci/pcireg.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcidevs.h>
102 #include <dev/pci/pciidereg.h>
103 #include <dev/pci/pciidevar.h>
104 #include <dev/pci/pciide_piix_reg.h>
105 #include <dev/pci/pciide_amd_reg.h>
106 #include <dev/pci/pciide_apollo_reg.h>
107 #include <dev/pci/pciide_cmd_reg.h>
108 #include <dev/pci/pciide_cy693_reg.h>
109 #include <dev/pci/pciide_sis_reg.h>
110 #include <dev/pci/pciide_acer_reg.h>
111 #include <dev/pci/pciide_pdc202xx_reg.h>
112 #include <dev/pci/pciide_opti_reg.h>
113 #include <dev/pci/pciide_hpt_reg.h>
114 #include <dev/pci/pciide_acard_reg.h>
115 
116 #include <dev/pci/cy82c693var.h>
117 
118 #include <dev/ata/atavar.h>
119 #include <dev/ic/wdcreg.h>
120 #include <dev/ic/wdcvar.h>
121 
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 					      int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 					   int, u_int8_t));
127 
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 	pci_chipset_tag_t pc;
131 	pcitag_t pa;
132 	int reg;
133 {
134 
135 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 	    ((reg & 0x03) * 8) & 0xff);
137 }
138 
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 	pci_chipset_tag_t pc;
142 	pcitag_t pa;
143 	int reg;
144 	u_int8_t val;
145 {
146 	pcireg_t pcival;
147 
148 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 	pcival &= ~(0xff << ((reg & 0x03) * 8));
150 	pcival |= (val << ((reg & 0x03) * 8));
151 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153 
154 struct pciide_softc {
155 	struct wdc_softc	sc_wdcdev;	/* common wdc definitions */
156 	pci_chipset_tag_t	sc_pc;		/* PCI registers info */
157 	pcitag_t		sc_tag;
158 	void			*sc_pci_ih;	/* PCI interrupt handle */
159 	int			sc_dma_ok;	/* bus-master DMA info */
160 	bus_space_tag_t		sc_dma_iot;
161 	bus_space_handle_t	sc_dma_ioh;
162 	bus_dma_tag_t		sc_dmat;
163 
164 	/* For Cypress */
165 	const struct cy82c693_handle *sc_cy_handle;
166 	int sc_cy_compatchan;
167 
168 	/* Chip description */
169 	const struct pciide_product_desc *sc_pp;
170 	/* common definitions */
171 	struct channel_softc *wdc_chanarray[PCIIDE_NUM_CHANNELS];
172 	/* internal bookkeeping */
173 	struct pciide_channel {			/* per-channel data */
174 		struct channel_softc wdc_channel; /* generic part */
175 		char		*name;
176 		int		hw_ok;		/* hardware mapped & OK? */
177 		int		compat;		/* is it compat? */
178 		int             dma_in_progress;
179 		void		*ih;		/* compat or pci handle */
180 		bus_space_handle_t ctl_baseioh;	/* ctrl regs blk, native mode */
181 		/* DMA tables and DMA map for xfer, for each drive */
182 		struct pciide_dma_maps {
183 			bus_dmamap_t    dmamap_table;
184 			struct idedma_table *dma_table;
185 			bus_dmamap_t    dmamap_xfer;
186 			int dma_flags;
187 		} dma_maps[2];
188 	} pciide_channels[PCIIDE_NUM_CHANNELS];
189 };
190 
191 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
192 
193 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void piix_setup_channel __P((struct channel_softc*));
195 void piix3_4_setup_channel __P((struct channel_softc*));
196 
197 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
198 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
199 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
200 
201 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void amd756_setup_channel __P((struct channel_softc*));
203 
204 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void apollo_setup_channel __P((struct channel_softc*));
206 
207 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
208 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void cmd0643_9_setup_channel __P((struct channel_softc*));
210 void cmd_channel_map __P((struct pci_attach_args *,
211 			struct pciide_softc *, int));
212 int  cmd_pci_intr __P((void *));
213 void cmd646_9_irqack __P((struct channel_softc *));
214 
215 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
216 void cy693_setup_channel __P((struct channel_softc*));
217 
218 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
219 void sis_setup_channel __P((struct channel_softc*));
220 
221 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
222 void acer_setup_channel __P((struct channel_softc*));
223 int  acer_pci_intr __P((void *));
224 
225 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
226 void pdc202xx_setup_channel __P((struct channel_softc*));
227 int  pdc202xx_pci_intr __P((void *));
228 int  pdc20265_pci_intr __P((void *));
229 
230 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
231 void opti_setup_channel __P((struct channel_softc*));
232 
233 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
234 void hpt_setup_channel __P((struct channel_softc*));
235 int  hpt_pci_intr __P((void *));
236 
237 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
238 void acard_setup_channel __P((struct channel_softc*));
239 int  acard_pci_intr __P((void *));
240 
241 void pciide_channel_dma_setup __P((struct pciide_channel *));
242 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
243 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
244 void pciide_dma_start __P((void*, int, int));
245 int  pciide_dma_finish __P((void*, int, int));
246 void pciide_irqack __P((struct channel_softc *));
247 void pciide_print_modes __P((struct pciide_channel *));
248 void pciide_print_channels __P((int, pcireg_t));;
249 
250 struct pciide_product_desc {
251 	u_int32_t ide_product;
252 	u_short ide_flags;
253 	/* map and setup chip, probe drives */
254 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
255 };
256 
257 /* Flags for ide_flags */
258 #define IDE_PCI_CLASS_OVERRIDE	0x0001	/* accept even if class != pciide */
259 #define IDE_16BIT_IOSPACE	0x0002	/* I/O space BARS ignore upper word */
260 
261 /* Default product description for devices not known from this controller */
262 const struct pciide_product_desc default_product_desc = {
263 	0,				/* Generic PCI IDE controller */
264 	0,
265 	default_chip_map
266 };
267 
268 const struct pciide_product_desc pciide_intel_products[] =  {
269 	{ PCI_PRODUCT_INTEL_82092AA,	/* Intel 82092AA IDE */
270 	  0,
271 	  default_chip_map
272 	},
273 	{ PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */
274 	  0,
275 	  piix_chip_map
276 	},
277 	{ PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */
278 	  0,
279 	  piix_chip_map
280 	},
281 	{ PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */
282 	  0,
283 	  piix_chip_map
284 	},
285 	{ PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */
286 	  0,
287 	  piix_chip_map
288 	},
289 	{ PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */
290 	  0,
291 	  piix_chip_map
292 	},
293 	{ PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */
294 	  0,
295 	  piix_chip_map
296 	},
297 	{ PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */
298 	  0,
299 	  piix_chip_map
300 	},
301 	{ PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */
302 	  0,
303 	  piix_chip_map
304 	},
305 };
306 
307 const struct pciide_product_desc pciide_amd_products[] =  {
308 	{ PCI_PRODUCT_AMD_PBC756_IDE,	/* AMD 756 */
309 	  0,
310 	  amd756_chip_map
311 	},
312 	{ PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */
313 	  0,
314 	  amd756_chip_map
315 	},
316 };
317 
318 #ifdef notyet
319 const struct pciide_product_desc pciide_opti_products[] = {
320 
321 	{ PCI_PRODUCT_OPTI_82C621,
322 	  0,
323 	  opti_chip_map
324 	},
325 	{ PCI_PRODUCT_OPTI_82C568,
326 	  0,
327 	  opti_chip_map
328 	},
329 	{ PCI_PRODUCT_OPTI_82D568,
330 	  0,
331 	  opti_chip_map
332 	},
333 };
334 #endif
335 
336 const struct pciide_product_desc pciide_cmd_products[] =  {
337 	{ PCI_PRODUCT_CMDTECH_640,	/* CMD Technology PCI0640 */
338 	  0,
339 	  cmd_chip_map
340 	},
341 	{ PCI_PRODUCT_CMDTECH_643,	/* CMD Technology PCI0643 */
342 	  0,
343 	  cmd0643_9_chip_map
344 	},
345 	{ PCI_PRODUCT_CMDTECH_646,	/* CMD Technology PCI0646 */
346 	  0,
347 	  cmd0643_9_chip_map
348 	},
349 	{ PCI_PRODUCT_CMDTECH_648,	/* CMD Technology PCI0648 */
350 	  IDE_PCI_CLASS_OVERRIDE,
351 	  cmd0643_9_chip_map
352 	},
353 	{ PCI_PRODUCT_CMDTECH_649,	/* CMD Technology PCI0649 */
354 	  IDE_PCI_CLASS_OVERRIDE,
355 	  cmd0643_9_chip_map
356 	}
357 };
358 
359 const struct pciide_product_desc pciide_via_products[] =  {
360 	{ PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */
361 	  0,
362 	  apollo_chip_map
363 	 },
364 	{ PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */
365 	  0,
366 	  apollo_chip_map
367 	}
368 };
369 
370 const struct pciide_product_desc pciide_cypress_products[] =  {
371 	{ PCI_PRODUCT_CONTAQ_82C693,	/* Contaq CY82C693 IDE */
372 	  IDE_16BIT_IOSPACE,
373 	  cy693_chip_map
374 	}
375 };
376 
377 const struct pciide_product_desc pciide_sis_products[] =  {
378 	{ PCI_PRODUCT_SIS_5513,		/* SIS 5513 EIDE */
379 	  0,
380 	  sis_chip_map
381 	}
382 };
383 
384 const struct pciide_product_desc pciide_acer_products[] =  {
385 	{ PCI_PRODUCT_ALI_M5229,	/* Acer Labs M5229 UDMA IDE */
386 	  0,
387 	  acer_chip_map
388 	}
389 };
390 
391 const struct pciide_product_desc pciide_triones_products[] =  {
392 	{ PCI_PRODUCT_TRIONES_HPT366,	/* Highpoint HPT36x/37x IDE */
393 	  IDE_PCI_CLASS_OVERRIDE,
394 	  hpt_chip_map,
395 	}
396 };
397 
398 const struct pciide_product_desc pciide_promise_products[] =  {
399 	{ PCI_PRODUCT_PROMISE_PDC20246,
400 	IDE_PCI_CLASS_OVERRIDE,
401 	pdc202xx_chip_map,
402 	},
403 	{ PCI_PRODUCT_PROMISE_PDC20262,
404 	IDE_PCI_CLASS_OVERRIDE,
405 	pdc202xx_chip_map,
406 	},
407 	{ PCI_PRODUCT_PROMISE_PDC20265,
408 	IDE_PCI_CLASS_OVERRIDE,
409 	pdc202xx_chip_map,
410 	},
411 	{ PCI_PRODUCT_PROMISE_PDC20267,
412 	IDE_PCI_CLASS_OVERRIDE,
413 	pdc202xx_chip_map,
414 	}
415 };
416 
417 const struct pciide_product_desc pciide_acard_products[] =  {
418 	{ PCI_PRODUCT_ACARD_ATP850U,	/* Acard ATP850U Ultra33 Controller */
419 	 IDE_PCI_CLASS_OVERRIDE,
420 	 acard_chip_map,
421 	 },
422 	{ PCI_PRODUCT_ACARD_ATP860,	/* Acard ATP860 Ultra66 Controller */
423 	 IDE_PCI_CLASS_OVERRIDE,
424 	 acard_chip_map,
425 	},
426 	{ PCI_PRODUCT_ACARD_ATP860A,	/* Acard ATP860-A Ultra66 Controller */
427 	 IDE_PCI_CLASS_OVERRIDE,
428 	 acard_chip_map,
429 	}
430 };
431 
432 struct pciide_vendor_desc {
433 	u_int32_t ide_vendor;
434 	const struct pciide_product_desc *ide_products;
435 	int ide_nproducts;
436 };
437 
438 const struct pciide_vendor_desc pciide_vendors[] = {
439 	{ PCI_VENDOR_INTEL, pciide_intel_products,
440 	  sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) },
441 	{ PCI_VENDOR_AMD, pciide_amd_products,
442 	  sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) },
443 #ifdef notyet
444 	{ PCI_VENDOR_OPTI, pciide_opti_products,
445 	  sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) },
446 #endif
447 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products,
448 	  sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) },
449 	{ PCI_VENDOR_VIATECH, pciide_via_products,
450 	  sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) },
451 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products,
452 	  sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) },
453 	{ PCI_VENDOR_SIS, pciide_sis_products,
454 	  sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) },
455 	{ PCI_VENDOR_ALI, pciide_acer_products,
456 	  sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) },
457 	{ PCI_VENDOR_TRIONES, pciide_triones_products,
458 	  sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) },
459 	{ PCI_VENDOR_ACARD, pciide_acard_products,
460 	  sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) },
461 	{ PCI_VENDOR_PROMISE, pciide_promise_products,
462 	  sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }
463 };
464 
465 /* options passed via the 'flags' config keyword */
466 #define PCIIDE_OPTIONS_DMA	0x01
467 
468 #ifndef __OpenBSD__
469 int	pciide_match __P((struct device *, struct cfdata *, void *));
470 #else
471 int	pciide_match __P((struct device *, void *, void *));
472 #endif
473 void	pciide_attach __P((struct device *, struct device *, void *));
474 
475 struct cfattach pciide_ca = {
476 	sizeof(struct pciide_softc), pciide_match, pciide_attach
477 };
478 
479 #ifdef __OpenBSD__
480 struct        cfdriver pciide_cd = {
481       NULL, "pciide", DV_DULL
482 };
483 #endif
484 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
485 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
486 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
487 int	pciide_mapregs_native __P((struct pci_attach_args *,
488 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
489 	    int (*pci_intr) __P((void *))));
490 void	pciide_mapreg_dma __P((struct pciide_softc *,
491 	    struct pci_attach_args *));
492 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
493 void	pciide_mapchan __P((struct pci_attach_args *,
494 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
495 	    int (*pci_intr) __P((void *))));
496 int	pciide_chan_candisable __P((struct pciide_channel *));
497 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
498 	    struct pciide_channel *, int, int));
499 void	pciide_unmap_compat_intr __P(( struct pci_attach_args *,
500 	    struct pciide_channel *, int, int));
501 int	pciide_compat_intr __P((void *));
502 int	pciide_pci_intr __P((void *));
503 int     pciide_intr_flag(struct pciide_channel *);
504 
505 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
506 
507 const struct pciide_product_desc *
508 pciide_lookup_product(id)
509 	u_int32_t id;
510 {
511 	const struct pciide_product_desc *pp;
512 	const struct pciide_vendor_desc *vp;
513 	int i;
514 
515 	for (i = 0, vp = pciide_vendors;
516 	    i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]);
517 	    vp++, i++)
518 		if (PCI_VENDOR(id) == vp->ide_vendor)
519 			break;
520 
521 	if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0]))
522 		return NULL;
523 
524 	for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++)
525 		if (PCI_PRODUCT(id) == pp->ide_product)
526 			break;
527 
528 	if (i == vp->ide_nproducts)
529 		return NULL;
530 	return pp;
531 }
532 
533 int
534 pciide_match(parent, match, aux)
535 	struct device *parent;
536 #ifdef __OpenBSD__
537 	void *match;
538 #else
539 	struct cfdata *match;
540 #endif
541 	void *aux;
542 {
543 	struct pci_attach_args *pa = aux;
544 	const struct pciide_product_desc *pp;
545 
546 	/*
547  	 * Some IDE controllers have severe bugs when used in PCI mode.
548 	 * We punt and attach them to the ISA bus instead.
549 	 */
550 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH &&
551 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000)
552 		return (0);
553 
554 	/*
555 	 * Check the ID register to see that it's a PCI IDE controller.
556 	 * If it is, we assume that we can deal with it; it _should_
557 	 * work in a standardized way...
558 	 */
559 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
560 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
561 		return (1);
562 	}
563 
564 	/*
565  	 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE
566 	 * controllers. Let see if we can deal with it anyway.
567 	 */
568 	pp = pciide_lookup_product(pa->pa_id);
569 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
570 		return (1);
571 	}
572 
573 	return (0);
574 }
575 
576 void
577 pciide_attach(parent, self, aux)
578 	struct device *parent, *self;
579 	void *aux;
580 {
581 	struct pci_attach_args *pa = aux;
582 	pci_chipset_tag_t pc = pa->pa_pc;
583 	pcitag_t tag = pa->pa_tag;
584 	struct pciide_softc *sc = (struct pciide_softc *)self;
585 	pcireg_t csr;
586 	char devinfo[256];
587 
588 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
589 	if (sc->sc_pp == NULL) {
590 		sc->sc_pp = &default_product_desc;
591 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
592 	}
593 
594 	sc->sc_pc = pa->pa_pc;
595 	sc->sc_tag = pa->pa_tag;
596 
597 #ifdef WDCDEBUG
598        if (wdcdebug_pciide_mask & DEBUG_PROBE)
599                printf(" sc_pc %p, sc_tag %p\n", sc->sc_pc, sc->sc_tag);
600 #endif
601 
602 	sc->sc_pp->chip_map(sc, pa);
603 
604 	if (sc->sc_dma_ok) {
605 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
606 		csr |= PCI_COMMAND_MASTER_ENABLE;
607 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
608 	}
609 
610 	WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n",
611 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
612 }
613 
614 /* tell wether the chip is enabled or not */
615 int
616 pciide_chipen(sc, pa)
617 	struct pciide_softc *sc;
618 	struct pci_attach_args *pa;
619 {
620 	pcireg_t csr;
621 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
622 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
623 		    PCI_COMMAND_STATUS_REG);
624 		printf("%s: device disabled (at %s)\n",
625 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
626 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
627 		  "device" : "bridge");
628 		return 0;
629 	}
630 	return 1;
631 }
632 
633 int
634 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
635 	struct pci_attach_args *pa;
636 	struct pciide_channel *cp;
637 	int compatchan;
638 	bus_size_t *cmdsizep, *ctlsizep;
639 {
640 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
641 	struct channel_softc *wdc_cp = &cp->wdc_channel;
642 
643 	cp->compat = 1;
644 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
645 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
646 
647 	wdc_cp->cmd_iot = pa->pa_iot;
648 
649 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
650 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
651 		printf("%s: couldn't map %s cmd regs\n",
652 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
653 		return (0);
654 	}
655 
656 	wdc_cp->ctl_iot = pa->pa_iot;
657 
658 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
659 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
660 		printf("%s: couldn't map %s ctl regs\n",
661 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
662 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
663 		    PCIIDE_COMPAT_CMD_SIZE);
664 		return (0);
665 	}
666 
667 	return (1);
668 }
669 
670 int
671 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
672 	struct pci_attach_args * pa;
673 	struct pciide_channel *cp;
674 	bus_size_t *cmdsizep, *ctlsizep;
675 	int (*pci_intr) __P((void *));
676 {
677 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
678 	struct channel_softc *wdc_cp = &cp->wdc_channel;
679 	const char *intrstr;
680 	pci_intr_handle_t intrhandle;
681 
682 	cp->compat = 0;
683 
684 	if (sc->sc_pci_ih == NULL) {
685 		if (pci_intr_map(pa, &intrhandle) != 0) {
686 			printf("%s: couldn't map native-PCI interrupt\n",
687 			    sc->sc_wdcdev.sc_dev.dv_xname);
688 			return 0;
689 		}
690 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
691 #ifdef __OpenBSD__
692 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
693 		    intrhandle, IPL_BIO, pci_intr, sc,
694 		    sc->sc_wdcdev.sc_dev.dv_xname);
695 #else
696 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
697 		    intrhandle, IPL_BIO, pci_intr, sc);
698 #endif
699 		if (sc->sc_pci_ih != NULL) {
700 			printf("%s: using %s for native-PCI interrupt\n",
701 			    sc->sc_wdcdev.sc_dev.dv_xname,
702 			    intrstr ? intrstr : "unknown interrupt");
703 		} else {
704 			printf("%s: couldn't establish native-PCI interrupt",
705 			    sc->sc_wdcdev.sc_dev.dv_xname);
706 			if (intrstr != NULL)
707 				printf(" at %s", intrstr);
708 			printf("\n");
709 			return 0;
710 		}
711 	}
712 	cp->ih = sc->sc_pci_ih;
713 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
714 	    PCI_MAPREG_TYPE_IO, 0,
715 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) {
716 		printf("%s: couldn't map %s cmd regs\n",
717 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
718 		return 0;
719 	}
720 
721 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
722 	    PCI_MAPREG_TYPE_IO, 0,
723 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) {
724 		printf("%s: couldn't map %s ctl regs\n",
725 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
726 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
727 		return 0;
728 	}
729 	/*
730 	 * In native mode, 4 bytes of I/O space are mapped for the control
731 	 * register, the control register is at offset 2. Pass the generic
732 	 * code a handle for only one byte at the right offset.
733 	 */
734 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
735 	    &wdc_cp->ctl_ioh) != 0) {
736 		printf("%s: unable to subregion %s channel ctl regs\n",
737 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
738 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
739 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
740 		return 0;
741 	}
742 	return (1);
743 }
744 
745 void
746 pciide_mapreg_dma(sc, pa)
747 	struct pciide_softc *sc;
748 	struct pci_attach_args *pa;
749 {
750 	pcireg_t maptype;
751 	bus_addr_t addr;
752 
753 	/*
754 	 * Map DMA registers
755 	 *
756 	 * Note that sc_dma_ok is the right variable to test to see if
757 	 * DMA can be done.  If the interface doesn't support DMA,
758 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
759 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
760 	 * non-zero if the interface supports DMA and the registers
761 	 * could be mapped.
762 	 *
763 	 * XXX Note that despite the fact that the Bus Master IDE specs
764 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
765 	 * XXX space," some controllers (at least the United
766 	 * XXX Microelectronics UM8886BF) place it in memory space.
767 	 */
768 
769 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
770 	    PCIIDE_REG_BUS_MASTER_DMA);
771 
772 	switch (maptype) {
773 	case PCI_MAPREG_TYPE_IO:
774 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
775 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
776 		    &addr, NULL, NULL) == 0);
777 		if (sc->sc_dma_ok == 0) {
778 			printf(", unused (couldn't query registers)");
779 			break;
780 		}
781 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
782 		    && addr >= 0x10000) {
783 			sc->sc_dma_ok = 0;
784 			printf(", unused (registers at unsafe address %#lx)", addr);
785 			break;
786 		}
787 		/* FALLTHROUGH */
788 
789 	case PCI_MAPREG_MEM_TYPE_32BIT:
790 		sc->sc_dma_ok = (pci_mapreg_map(pa,
791 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
792 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0);
793 		sc->sc_dmat = pa->pa_dmat;
794 		if (sc->sc_dma_ok == 0) {
795 			printf(", unused (couldn't map registers)");
796 		} else {
797 			sc->sc_wdcdev.dma_arg = sc;
798 			sc->sc_wdcdev.dma_init = pciide_dma_init;
799 			sc->sc_wdcdev.dma_start = pciide_dma_start;
800 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
801 		}
802 		break;
803 
804 	default:
805 		sc->sc_dma_ok = 0;
806 		printf(", (unsupported maptype 0x%x)", maptype);
807 		break;
808 	}
809 }
810 
811 int
812 pciide_intr_flag(struct pciide_channel *cp)
813 {
814 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
815 
816 	if (cp->dma_in_progress) {
817 		int retry = 10;
818 		int status;
819 
820 		/* Check the status register */
821 		for (retry = 10; retry > 0; retry--) {
822 			status = bus_space_read_1(sc->sc_dma_iot,
823 			    sc->sc_dma_ioh,
824 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET *
825 			    cp->wdc_channel.channel);
826 			if (status & IDEDMA_CTL_INTR) {
827 				break;
828 			}
829 			DELAY(5);
830 		}
831 
832 		/* Not for us.  */
833 		if (retry == 0)
834 			return (0);
835 
836 		return (1);
837 	}
838 
839 	return (-1);
840 }
841 
842 int
843 pciide_compat_intr(arg)
844 	void *arg;
845 {
846 	struct pciide_channel *cp = arg;
847 
848 	if (pciide_intr_flag(cp) == 0)
849 		return 0;
850 
851 #ifdef DIAGNOSTIC
852 	/* should only be called for a compat channel */
853 	if (cp->compat == 0)
854 		panic("pciide compat intr called for non-compat chan %p\n", cp);
855 #endif
856 	return (wdcintr(&cp->wdc_channel));
857 }
858 
859 int
860 pciide_pci_intr(arg)
861 	void *arg;
862 {
863 	struct pciide_softc *sc = arg;
864 	struct pciide_channel *cp;
865 	struct channel_softc *wdc_cp;
866 	int i, rv, crv;
867 
868 	rv = 0;
869 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
870 		cp = &sc->pciide_channels[i];
871 		wdc_cp = &cp->wdc_channel;
872 
873 		/* If a compat channel skip. */
874 		if (cp->compat)
875 			continue;
876 		/* if this channel not waiting for intr, skip */
877 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
878 			continue;
879 
880 		if (pciide_intr_flag(cp) == 0)
881 			continue;
882 
883 		crv = wdcintr(wdc_cp);
884 		if (crv == 0)
885 			;		/* leave rv alone */
886 		else if (crv == 1)
887 			rv = 1;		/* claim the intr */
888 		else if (rv == 0)	/* crv should be -1 in this case */
889 			rv = crv;	/* if we've done no better, take it */
890 	}
891 	return (rv);
892 }
893 
894 void
895 pciide_channel_dma_setup(cp)
896 	struct pciide_channel *cp;
897 {
898 	int drive;
899 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
900 	struct ata_drive_datas *drvp;
901 
902 	for (drive = 0; drive < 2; drive++) {
903 		drvp = &cp->wdc_channel.ch_drive[drive];
904 		/* If no drive, skip */
905 		if ((drvp->drive_flags & DRIVE) == 0)
906 			continue;
907 		/* setup DMA if needed */
908 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
909 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
910 		    sc->sc_dma_ok == 0) {
911 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
912 			continue;
913 		}
914 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
915 		    != 0) {
916 			/* Abort DMA setup */
917 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
918 			continue;
919 		}
920 	}
921 }
922 
923 int
924 pciide_dma_table_setup(sc, channel, drive)
925 	struct pciide_softc *sc;
926 	int channel, drive;
927 {
928 	bus_dma_segment_t seg;
929 	int error, rseg;
930 	const bus_size_t dma_table_size =
931 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
932 	struct pciide_dma_maps *dma_maps =
933 	    &sc->pciide_channels[channel].dma_maps[drive];
934 
935 	/* If table was already allocated, just return */
936 	if (dma_maps->dma_table)
937 		return 0;
938 
939 	/* Allocate memory for the DMA tables and map it */
940 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
941 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
942 	    BUS_DMA_NOWAIT)) != 0) {
943 		printf("%s:%d: unable to allocate table DMA for "
944 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
945 		    channel, drive, error);
946 		return error;
947 	}
948 
949 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
950 	    dma_table_size,
951 	    (caddr_t *)&dma_maps->dma_table,
952 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
953 		printf("%s:%d: unable to map table DMA for"
954 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
955 		    channel, drive, error);
956 		return error;
957 	}
958 
959 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
960 	    "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
961 	    seg.ds_addr), DEBUG_PROBE);
962 
963 	/* Create and load table DMA map for this disk */
964 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
965 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
966 	    &dma_maps->dmamap_table)) != 0) {
967 		printf("%s:%d: unable to create table DMA map for "
968 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
969 		    channel, drive, error);
970 		return error;
971 	}
972 	if ((error = bus_dmamap_load(sc->sc_dmat,
973 	    dma_maps->dmamap_table,
974 	    dma_maps->dma_table,
975 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
976 		printf("%s:%d: unable to load table DMA map for "
977 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
978 		    channel, drive, error);
979 		return error;
980 	}
981 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
982 	    dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
983 	/* Create a xfer DMA map for this drive */
984 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
985 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
986 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
987 	    &dma_maps->dmamap_xfer)) != 0) {
988 		printf("%s:%d: unable to create xfer DMA map for "
989 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
990 		    channel, drive, error);
991 		return error;
992 	}
993 	return 0;
994 }
995 
996 int
997 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
998 	void *v;
999 	int channel, drive;
1000 	void *databuf;
1001 	size_t datalen;
1002 	int flags;
1003 {
1004 	struct pciide_softc *sc = v;
1005 	int error, seg;
1006 	struct pciide_dma_maps *dma_maps =
1007 	    &sc->pciide_channels[channel].dma_maps[drive];
1008 #ifndef BUS_DMA_RAW
1009 #define BUS_DMA_RAW 0
1010 #endif
1011 
1012 	error = bus_dmamap_load(sc->sc_dmat,
1013 	    dma_maps->dmamap_xfer,
1014 	    databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW);
1015 	if (error) {
1016 		printf("%s:%d: unable to load xfer DMA map for"
1017 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1018 		    channel, drive, error);
1019 		return error;
1020 	}
1021 
1022 #ifdef __HAVE_NEW_BUS_DMAMAP_SYNC
1023 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1024 	    dma_maps->dmamap_xfer->dm_mapsize,
1025 	    (flags & WDC_DMA_READ) ?
1026 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1027 #else
1028 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1029 	    (flags & WDC_DMA_READ) ?
1030 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1031 #endif
1032 
1033 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1034 #ifdef DIAGNOSTIC
1035 		/* A segment must not cross a 64k boundary */
1036 		{
1037 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1038 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1039 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1040 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1041 			printf("pciide_dma: segment %d physical addr 0x%lx"
1042 			    " len 0x%lx not properly aligned\n",
1043 			    seg, phys, len);
1044 			panic("pciide_dma: buf align");
1045 		}
1046 		}
1047 #endif
1048 		dma_maps->dma_table[seg].base_addr =
1049 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1050 		dma_maps->dma_table[seg].byte_count =
1051 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1052 		    IDEDMA_BYTE_COUNT_MASK);
1053 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1054 		   seg, letoh32(dma_maps->dma_table[seg].byte_count),
1055 		   letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1056 
1057 	}
1058 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1059 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1060 
1061 #ifdef __HAVE_NEW_BUS_DMAMAP_SYNC
1062 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1063 	    dma_maps->dmamap_table->dm_mapsize,
1064 	    BUS_DMASYNC_PREWRITE);
1065 #else
1066 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table,
1067 	    BUS_DMASYNC_PREWRITE);
1068 #endif
1069 
1070 	/* Maps are ready. Start DMA function */
1071 #ifdef DIAGNOSTIC
1072 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1073 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1074 		    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1075 		panic("pciide_dma_init: table align");
1076 	}
1077 #endif
1078 
1079 	/* Clear status bits */
1080 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1081 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1082 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1083 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1084 	/* Write table addr */
1085 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1086 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1087 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1088 	/* set read/write */
1089 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1090 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1091 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1092 	/* remember flags */
1093 	dma_maps->dma_flags = flags;
1094 	return 0;
1095 }
1096 
1097 void
1098 pciide_dma_start(v, channel, drive)
1099 	void *v;
1100 	int channel, drive;
1101 {
1102 	struct pciide_softc *sc = v;
1103 
1104 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1105 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1106 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1107 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1108 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1109 
1110 	sc->pciide_channels[channel].dma_in_progress = 1;
1111 }
1112 
1113 int
1114 pciide_dma_finish(v, channel, drive)
1115 	void *v;
1116 	int channel, drive;
1117 {
1118 	struct pciide_softc *sc = v;
1119 	u_int8_t status;
1120 	int error = 0;
1121 	struct pciide_dma_maps *dma_maps =
1122 	    &sc->pciide_channels[channel].dma_maps[drive];
1123 
1124 	sc->pciide_channels[channel].dma_in_progress = 0;
1125 
1126 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1127 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1128 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1129 	    DEBUG_XFERS);
1130 
1131 	/* stop DMA channel */
1132 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1133 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1134 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1135 	    0x00 : IDEDMA_CMD_WRITE);
1136 
1137 	/* Unload the map of the data buffer */
1138 #ifdef __HAVE_NEW_BUS_DMAMAP_SYNC
1139 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1140 	    dma_maps->dmamap_xfer->dm_mapsize,
1141 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1142 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1143 #else
1144 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1145 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1146 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1147 #endif
1148 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1149 
1150 	/* Clear status bits */
1151 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1152 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1153 	    status);
1154 
1155 	if ((status & IDEDMA_CTL_ERR) != 0) {
1156 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1157 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1158 		error |= WDC_DMAST_ERR;
1159 	}
1160 
1161 	if ((status & IDEDMA_CTL_INTR) == 0) {
1162 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1163 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1164 		    drive, status);
1165 		error |= WDC_DMAST_NOIRQ;
1166 	}
1167 
1168 	if ((status & IDEDMA_CTL_ACT) != 0) {
1169 		/* data underrun, may be a valid condition for ATAPI */
1170 		error |= WDC_DMAST_UNDER;
1171 	}
1172 	return error;
1173 }
1174 
1175 void
1176 pciide_irqack(chp)
1177         struct channel_softc *chp;
1178 {
1179         struct pciide_channel *cp = (struct pciide_channel*)chp;
1180         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1181 
1182         /* clear status bits in IDE DMA registers */
1183         bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1184             IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1185             bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1186                 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1187 }
1188 
1189 /* some common code used by several chip_map */
1190 int
1191 pciide_chansetup(sc, channel, interface)
1192 	struct pciide_softc *sc;
1193 	int channel;
1194 	pcireg_t interface;
1195 {
1196 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1197 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1198 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1199 	cp->wdc_channel.channel = channel;
1200 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1201 	cp->wdc_channel.ch_queue =
1202 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1203 	if (cp->wdc_channel.ch_queue == NULL) {
1204 		printf("%s: %s "
1205 			"cannot allocate memory for command queue",
1206 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1207 		return 0;
1208 	}
1209 	cp->hw_ok = 1;
1210 
1211 	return 1;
1212 }
1213 
1214 /* some common code used by several chip channel_map */
1215 void
1216 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1217 	struct pci_attach_args *pa;
1218 	struct pciide_channel *cp;
1219 	pcireg_t interface;
1220 	bus_size_t *cmdsizep, *ctlsizep;
1221 	int (*pci_intr) __P((void *));
1222 {
1223 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1224 
1225 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1226 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1227 		    pci_intr);
1228 	else
1229 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1230 		    wdc_cp->channel, cmdsizep, ctlsizep);
1231 	if (cp->hw_ok == 0)
1232 		return;
1233 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1234 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1235 	wdcattach(wdc_cp);
1236 }
1237 
1238 /*
1239  * Generic code to call to know if a channel can be disabled. Return 1
1240  * if channel can be disabled, 0 if not
1241  */
1242 int
1243 pciide_chan_candisable(cp)
1244 	struct pciide_channel *cp;
1245 {
1246 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1247 
1248 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1249 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1250 		cp->hw_ok = 0;
1251 		return 1;
1252 	}
1253 	return 0;
1254 }
1255 
1256 /*
1257  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1258  * Set hw_ok=0 on failure
1259  */
1260 void
1261 pciide_map_compat_intr(pa, cp, compatchan, interface)
1262 	struct pci_attach_args *pa;
1263 	struct pciide_channel *cp;
1264 	int compatchan, interface;
1265 {
1266 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1267 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1268 
1269 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1270 		return;
1271 
1272 	cp->compat = 1;
1273 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1274 	    pa, compatchan, pciide_compat_intr, cp);
1275 	if (cp->ih == NULL) {
1276 		printf("%s: no compatibility interrupt for use by %s\n",
1277 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1278 		cp->hw_ok = 0;
1279 	}
1280 }
1281 
1282 /*
1283  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1284  * Set hw_ok=0 on failure
1285  */
1286 void
1287 pciide_unmap_compat_intr(pa, cp, compatchan, interface)
1288 	struct pci_attach_args *pa;
1289 	struct pciide_channel *cp;
1290 	int compatchan, interface;
1291 {
1292 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1293 
1294 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1295 		return;
1296 
1297 	pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih);
1298 }
1299 
1300 void
1301 pciide_print_channels(nchannels, interface)
1302 	int nchannels;
1303 	pcireg_t interface;
1304 {
1305 	int i;
1306 
1307 	for (i = 0; i < nchannels; i++) {
1308 		printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i),
1309 		    (interface & PCIIDE_INTERFACE_SETTABLE(i)) ?
1310 		    "configured" : "wired",
1311 		    (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" :
1312 		    "compatibility");
1313 		    }
1314 
1315 	printf("\n");
1316 }
1317 
1318 void
1319 pciide_print_modes(cp)
1320 	struct pciide_channel *cp;
1321 {
1322 	wdc_print_current_modes(&cp->wdc_channel);
1323 }
1324 
1325 void
1326 default_chip_map(sc, pa)
1327 	struct pciide_softc *sc;
1328 	struct pci_attach_args *pa;
1329 {
1330 	struct pciide_channel *cp;
1331 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1332 	pcireg_t csr;
1333 	int channel, drive;
1334 	struct ata_drive_datas *drvp;
1335 	u_int8_t idedma_ctl;
1336 	bus_size_t cmdsize, ctlsize;
1337 	char *failreason;
1338 
1339 	if (pciide_chipen(sc, pa) == 0)
1340 		return;
1341 
1342 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1343 		printf(": DMA");
1344 		if (sc->sc_pp == &default_product_desc &&
1345 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1346 		    PCIIDE_OPTIONS_DMA) == 0) {
1347 			printf(" (unsupported)");
1348 			sc->sc_dma_ok = 0;
1349 		} else {
1350 			pciide_mapreg_dma(sc, pa);
1351 		if (sc->sc_dma_ok != 0)
1352 			printf(", (partial support)");
1353 		}
1354 	} else {
1355 		printf(": no DMA");
1356 		sc->sc_dma_ok = 0;
1357 	}
1358 	if (sc->sc_dma_ok) {
1359 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1360 		sc->sc_wdcdev.irqack = pciide_irqack;
1361 	}
1362 	sc->sc_wdcdev.PIO_cap = 0;
1363 	sc->sc_wdcdev.DMA_cap = 0;
1364 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1365 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1366 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1367 
1368 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1369 
1370 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1371 		cp = &sc->pciide_channels[channel];
1372 		if (pciide_chansetup(sc, channel, interface) == 0)
1373 		    continue;
1374 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1375 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1376 			    &ctlsize, pciide_pci_intr);
1377 		} else {
1378 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1379 			    channel, &cmdsize, &ctlsize);
1380 		}
1381 		if (cp->hw_ok == 0)
1382 			continue;
1383 		/*
1384 		 * Check to see if something appears to be there.
1385 		 */
1386 		failreason = NULL;
1387 		pciide_map_compat_intr(pa, cp, channel, interface);
1388 		if (cp->hw_ok == 0)
1389 			continue;
1390 		if (!wdcprobe(&cp->wdc_channel)) {
1391 			failreason = "not responding; disabled or no drives?";
1392 			goto next;
1393 		}
1394 		/*
1395 		 * Now, make sure it's actually attributable to this PCI IDE
1396 		 * channel by trying to access the channel again while the
1397 		 * PCI IDE controller's I/O space is disabled.  (If the
1398 		 * channel no longer appears to be there, it belongs to
1399 		 * this controller.)  YUCK!
1400 		 */
1401 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1402 	  	    PCI_COMMAND_STATUS_REG);
1403 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1404 		    csr & ~PCI_COMMAND_IO_ENABLE);
1405 		if (wdcprobe(&cp->wdc_channel))
1406 			failreason = "other hardware responding at addresses";
1407 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1408 		    PCI_COMMAND_STATUS_REG, csr);
1409 next:
1410 		if (failreason) {
1411 			printf("%s: %s ignored (%s)\n",
1412 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1413 			    failreason);
1414 			cp->hw_ok = 0;
1415 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1416 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1417 			    cp->wdc_channel.cmd_ioh, cmdsize);
1418 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1419 			    cp->wdc_channel.ctl_ioh, ctlsize);
1420 		}
1421 		if (cp->hw_ok) {
1422 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1423 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1424 			wdcattach(&cp->wdc_channel);
1425 		}
1426 	}
1427 
1428 	if (sc->sc_dma_ok == 0)
1429 		return;
1430 
1431 	/* Allocate DMA maps */
1432 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1433 		idedma_ctl = 0;
1434 		cp = &sc->pciide_channels[channel];
1435 		for (drive = 0; drive < 2; drive++) {
1436 			drvp = &cp->wdc_channel.ch_drive[drive];
1437 			/* If no drive, skip */
1438 			if ((drvp->drive_flags & DRIVE) == 0)
1439 				continue;
1440 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1441 				continue;
1442 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1443 				/* Abort DMA setup */
1444 				printf("%s:%d:%d: cannot allocate DMA maps, "
1445 				    "using PIO transfers\n",
1446 				    sc->sc_wdcdev.sc_dev.dv_xname,
1447 				    channel, drive);
1448 				drvp->drive_flags &= ~DRIVE_DMA;
1449 			}
1450 			printf("%s:%d:%d: using DMA data transfers\n",
1451 			    sc->sc_wdcdev.sc_dev.dv_xname,
1452 			    channel, drive);
1453 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1454 		}
1455 		if (idedma_ctl != 0) {
1456 			/* Add software bits in status register */
1457 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1458 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1459 			    idedma_ctl);
1460 		}
1461 	}
1462 }
1463 
1464 void
1465 piix_chip_map(sc, pa)
1466 	struct pciide_softc *sc;
1467 	struct pci_attach_args *pa;
1468 {
1469 	struct pciide_channel *cp;
1470 	int channel;
1471 	u_int32_t idetim;
1472 	bus_size_t cmdsize, ctlsize;
1473 
1474 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1475 
1476 	if (pciide_chipen(sc, pa) == 0)
1477 		return;
1478 
1479 	printf(": DMA");
1480 	pciide_mapreg_dma(sc, pa);
1481 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1482 	    WDC_CAPABILITY_MODE;
1483 	if (sc->sc_dma_ok) {
1484 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1485 		sc->sc_wdcdev.irqack = pciide_irqack;
1486 		switch (sc->sc_pp->ide_product) {
1487 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1488 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1489 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1490 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1491 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1492 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1493 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1494 			break;
1495 		}
1496 	}
1497 	sc->sc_wdcdev.PIO_cap = 4;
1498 	sc->sc_wdcdev.DMA_cap = 2;
1499 	switch (sc->sc_pp->ide_product) {
1500 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1501 		sc->sc_wdcdev.UDMA_cap = 4;
1502 		break;
1503 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1504 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1505 		sc->sc_wdcdev.UDMA_cap = 5;
1506 		break;
1507 	default:
1508 		sc->sc_wdcdev.UDMA_cap = 2;
1509 		break;
1510 	}
1511 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1512 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1513 	else
1514 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1515 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1516 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1517 
1518 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1519 
1520 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1521 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1522 	    DEBUG_PROBE);
1523 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1524 		WDCDEBUG_PRINT((", sidetim=0x%x",
1525 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1526 		    DEBUG_PROBE);
1527 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1528 			WDCDEBUG_PRINT((", udamreg 0x%x",
1529 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1530 			    DEBUG_PROBE);
1531 		}
1532 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1533 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1534 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1535 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1536 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1537 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1538 			    DEBUG_PROBE);
1539 		}
1540 
1541 	}
1542 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1543 
1544 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1545 		cp = &sc->pciide_channels[channel];
1546 		/* PIIX is compat-only */
1547 		if (pciide_chansetup(sc, channel, 0) == 0)
1548 			continue;
1549 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1550 		if ((PIIX_IDETIM_READ(idetim, channel) &
1551 		    PIIX_IDETIM_IDE) == 0) {
1552 			printf("%s: %s ignored (disabled)\n",
1553 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1554 			continue;
1555 		}
1556 		/* PIIX are compat-only pciide devices */
1557 		pciide_map_compat_intr(pa, cp, channel, 0);
1558 		if (cp->hw_ok == 0)
1559 			continue;
1560 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1561 		if (cp->hw_ok == 0)
1562 			goto next;
1563 		if (pciide_chan_candisable(cp)) {
1564 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1565 			    channel);
1566 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1567 			    idetim);
1568 		}
1569 		if (cp->hw_ok == 0)
1570 			goto next;
1571 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1572 next:
1573 		if (cp->hw_ok == 0)
1574 			pciide_unmap_compat_intr(pa, cp, channel, 0);
1575 	}
1576 
1577 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1578 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1579 	    DEBUG_PROBE);
1580 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1581 		WDCDEBUG_PRINT((", sidetim=0x%x",
1582 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1583 		    DEBUG_PROBE);
1584 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1585 			WDCDEBUG_PRINT((", udamreg 0x%x",
1586 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1587 			        DEBUG_PROBE);
1588 		}
1589 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1590 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1591 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1592 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1593 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1594 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1595 			DEBUG_PROBE);
1596 		}
1597 	}
1598 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1599 }
1600 
1601 void
1602 piix_setup_channel(chp)
1603 	struct channel_softc *chp;
1604 {
1605 	u_int8_t mode[2], drive;
1606 	u_int32_t oidetim, idetim, idedma_ctl;
1607 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1608 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1609 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1610 
1611 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1612 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1613 	idedma_ctl = 0;
1614 
1615 	/* set up new idetim: Enable IDE registers decode */
1616 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1617 	    chp->channel);
1618 
1619 	/* setup DMA */
1620 	pciide_channel_dma_setup(cp);
1621 
1622 	/*
1623 	 * Here we have to mess up with drives mode: PIIX can't have
1624 	 * different timings for master and slave drives.
1625 	 * We need to find the best combination.
1626 	 */
1627 
1628 	/* If both drives supports DMA, take the lower mode */
1629 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1630 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1631 		mode[0] = mode[1] =
1632 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1633 		    drvp[0].DMA_mode = mode[0];
1634 		    drvp[1].DMA_mode = mode[1];
1635 		goto ok;
1636 	}
1637 	/*
1638 	 * If only one drive supports DMA, use its mode, and
1639 	 * put the other one in PIO mode 0 if mode not compatible
1640 	 */
1641 	if (drvp[0].drive_flags & DRIVE_DMA) {
1642 		mode[0] = drvp[0].DMA_mode;
1643 		mode[1] = drvp[1].PIO_mode;
1644 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1645 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1646 			mode[1] = drvp[1].PIO_mode = 0;
1647 		goto ok;
1648 	}
1649 	if (drvp[1].drive_flags & DRIVE_DMA) {
1650 		mode[1] = drvp[1].DMA_mode;
1651 		mode[0] = drvp[0].PIO_mode;
1652 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1653 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1654 			mode[0] = drvp[0].PIO_mode = 0;
1655 		goto ok;
1656 	}
1657 	/*
1658 	 * If both drives are not DMA, takes the lower mode, unless
1659 	 * one of them is PIO mode < 2
1660 	 */
1661 	if (drvp[0].PIO_mode < 2) {
1662 		mode[0] = drvp[0].PIO_mode = 0;
1663 		mode[1] = drvp[1].PIO_mode;
1664 	} else if (drvp[1].PIO_mode < 2) {
1665 		mode[1] = drvp[1].PIO_mode = 0;
1666 		mode[0] = drvp[0].PIO_mode;
1667 	} else {
1668 		mode[0] = mode[1] =
1669 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1670 		drvp[0].PIO_mode = mode[0];
1671 		drvp[1].PIO_mode = mode[1];
1672 	}
1673 ok:	/* The modes are setup */
1674 	for (drive = 0; drive < 2; drive++) {
1675 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1676 			idetim |= piix_setup_idetim_timings(
1677 			    mode[drive], 1, chp->channel);
1678 			goto end;
1679 		}
1680 	}
1681 	/* If we are there, none of the drives are DMA */
1682 	if (mode[0] >= 2)
1683 		idetim |= piix_setup_idetim_timings(
1684 		    mode[0], 0, chp->channel);
1685 	else
1686 		idetim |= piix_setup_idetim_timings(
1687 		    mode[1], 0, chp->channel);
1688 end:	/*
1689 	 * timing mode is now set up in the controller. Enable
1690 	 * it per-drive
1691 	 */
1692 	for (drive = 0; drive < 2; drive++) {
1693 		/* If no drive, skip */
1694 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1695 			continue;
1696 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1697 		if (drvp[drive].drive_flags & DRIVE_DMA)
1698 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1699 	}
1700 	if (idedma_ctl != 0) {
1701 		/* Add software bits in status register */
1702 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1703 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1704 		    idedma_ctl);
1705 	}
1706 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1707 	pciide_print_modes(cp);
1708 }
1709 
1710 void
1711 piix3_4_setup_channel(chp)
1712 	struct channel_softc *chp;
1713 {
1714 	struct ata_drive_datas *drvp;
1715 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1716 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1717 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1718 	int drive;
1719 	int channel = chp->channel;
1720 
1721 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1722 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1723 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1724 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1725 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1726 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1727 	    PIIX_SIDETIM_RTC_MASK(channel));
1728 
1729 	idedma_ctl = 0;
1730 	/* If channel disabled, no need to go further */
1731 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1732 		return;
1733 	/* set up new idetim: Enable IDE registers decode */
1734 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1735 
1736 	/* setup DMA if needed */
1737 	pciide_channel_dma_setup(cp);
1738 
1739 	for (drive = 0; drive < 2; drive++) {
1740 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1741 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1742 		drvp = &chp->ch_drive[drive];
1743 		/* If no drive, skip */
1744 		if ((drvp->drive_flags & DRIVE) == 0)
1745 			continue;
1746 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1747 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1748 			goto pio;
1749 
1750 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1751 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1752 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1753 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1754 		    ideconf |= PIIX_CONFIG_PINGPONG;
1755 		}
1756 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1757 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) {
1758 			/* setup Ultra/100 */
1759 			if (drvp->UDMA_mode > 2 &&
1760 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1761 				drvp->UDMA_mode = 2;
1762 			if (drvp->UDMA_mode > 4) {
1763 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1764 			} else {
1765 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1766 				if (drvp->UDMA_mode > 2) {
1767 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1768 					    drive);
1769 				} else {
1770 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1771 					    drive);
1772 				}
1773 			}
1774 		}
1775 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1776 			/* setup Ultra/66 */
1777 			if (drvp->UDMA_mode > 2 &&
1778 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1779 				drvp->UDMA_mode = 2;
1780 			if (drvp->UDMA_mode > 2)
1781 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1782 			else
1783 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1784 		}
1785 
1786 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1787 		    (drvp->drive_flags & DRIVE_UDMA)) {
1788 			/* use Ultra/DMA */
1789 			drvp->drive_flags &= ~DRIVE_DMA;
1790 			udmareg |= PIIX_UDMACTL_DRV_EN( channel,drive);
1791 			udmareg |= PIIX_UDMATIM_SET(
1792 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1793 		} else {
1794 			/* use Multiword DMA */
1795 			drvp->drive_flags &= ~DRIVE_UDMA;
1796 			if (drive == 0) {
1797 				idetim |= piix_setup_idetim_timings(
1798 				    drvp->DMA_mode, 1, channel);
1799 			} else {
1800 				sidetim |= piix_setup_sidetim_timings(
1801 					drvp->DMA_mode, 1, channel);
1802 				idetim =PIIX_IDETIM_SET(idetim,
1803 				    PIIX_IDETIM_SITRE, channel);
1804 			}
1805 		}
1806 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1807 
1808 pio:		/* use PIO mode */
1809 		idetim |= piix_setup_idetim_drvs(drvp);
1810 		if (drive == 0) {
1811 			idetim |= piix_setup_idetim_timings(
1812 			    drvp->PIO_mode, 0, channel);
1813 		} else {
1814 			sidetim |= piix_setup_sidetim_timings(
1815 				drvp->PIO_mode, 0, channel);
1816 			idetim =PIIX_IDETIM_SET(idetim,
1817 			    PIIX_IDETIM_SITRE, channel);
1818 		}
1819 	}
1820 	if (idedma_ctl != 0) {
1821 		/* Add software bits in status register */
1822 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1823 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1824 		    idedma_ctl);
1825 	}
1826 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1827 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1828 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1829 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1830 	pciide_print_modes(cp);
1831 }
1832 
1833 
1834 /* setup ISP and RTC fields, based on mode */
1835 static u_int32_t
1836 piix_setup_idetim_timings(mode, dma, channel)
1837 	u_int8_t mode;
1838 	u_int8_t dma;
1839 	u_int8_t channel;
1840 {
1841 
1842 	if (dma)
1843 		return PIIX_IDETIM_SET(0,
1844 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1845 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1846 		    channel);
1847 	else
1848 		return PIIX_IDETIM_SET(0,
1849 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1850 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1851 		    channel);
1852 }
1853 
1854 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1855 static u_int32_t
1856 piix_setup_idetim_drvs(drvp)
1857 	struct ata_drive_datas *drvp;
1858 {
1859 	u_int32_t ret = 0;
1860 	struct channel_softc *chp = drvp->chnl_softc;
1861 	u_int8_t channel = chp->channel;
1862 	u_int8_t drive = drvp->drive;
1863 
1864 	/*
1865 	 * If drive is using UDMA, timings setups are independant
1866 	 * So just check DMA and PIO here.
1867 	 */
1868 	if (drvp->drive_flags & DRIVE_DMA) {
1869 		/* if mode = DMA mode 0, use compatible timings */
1870 		if ((drvp->drive_flags & DRIVE_DMA) &&
1871 		    drvp->DMA_mode == 0) {
1872 			drvp->PIO_mode = 0;
1873 			return ret;
1874 		}
1875 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1876 		/*
1877 		 * PIO and DMA timings are the same, use fast timings for PIO
1878 		 * too, else use compat timings.
1879 		 */
1880 		if ((piix_isp_pio[drvp->PIO_mode] !=
1881 		    piix_isp_dma[drvp->DMA_mode]) ||
1882 		    (piix_rtc_pio[drvp->PIO_mode] !=
1883 		    piix_rtc_dma[drvp->DMA_mode]))
1884 			drvp->PIO_mode = 0;
1885 		/* if PIO mode <= 2, use compat timings for PIO */
1886 		if (drvp->PIO_mode <= 2) {
1887 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1888 			    channel);
1889 			return ret;
1890 		}
1891 	}
1892 
1893 	/*
1894 	 * Now setup PIO modes. If mode < 2, use compat timings.
1895 	 * Else enable fast timings. Enable IORDY and prefetch/post
1896 	 * if PIO mode >= 3.
1897 	 */
1898 
1899 	if (drvp->PIO_mode < 2)
1900 		return ret;
1901 
1902 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1903 	if (drvp->PIO_mode >= 3) {
1904 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1905 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1906 	}
1907 	return ret;
1908 }
1909 
1910 /* setup values in SIDETIM registers, based on mode */
1911 static u_int32_t
1912 piix_setup_sidetim_timings(mode, dma, channel)
1913 	u_int8_t mode;
1914 	u_int8_t dma;
1915 	u_int8_t channel;
1916 {
1917 	if (dma)
1918 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1919 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1920 	else
1921 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1922 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1923 }
1924 
1925 void
1926 amd756_chip_map(sc, pa)
1927 	struct pciide_softc *sc;
1928 	struct pci_attach_args *pa;
1929 {
1930 	struct pciide_channel *cp;
1931 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1932 	int channel;
1933 	pcireg_t chanenable;
1934 	bus_size_t cmdsize, ctlsize;
1935 
1936 	if (pciide_chipen(sc, pa) == 0)
1937 		return;
1938 
1939 	printf(": DMA");
1940 	pciide_mapreg_dma(sc, pa);
1941 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1942 	    WDC_CAPABILITY_MODE;
1943 	if (sc->sc_dma_ok) {
1944 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1945                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1946                 sc->sc_wdcdev.irqack = pciide_irqack;
1947 	}
1948 	sc->sc_wdcdev.PIO_cap = 4;
1949 	sc->sc_wdcdev.DMA_cap = 2;
1950 	switch (sc->sc_pp->ide_product) {
1951 	case PCI_PRODUCT_AMD_766_IDE:
1952 		sc->sc_wdcdev.UDMA_cap = 5;
1953 		break;
1954 	default:
1955 		sc->sc_wdcdev.UDMA_cap = 4;
1956 		break;
1957 	}
1958 	sc->sc_wdcdev.set_modes = amd756_setup_channel;
1959 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1960 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1961 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1962 
1963 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1964 
1965 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1966 		cp = &sc->pciide_channels[channel];
1967 		if (pciide_chansetup(sc, channel, interface) == 0)
1968 			continue;
1969 
1970 		if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1971 			printf("%s: %s ignored (disabled)\n",
1972 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1973 			continue;
1974 		}
1975 		pciide_map_compat_intr(pa, cp, channel, interface);
1976 		if (cp->hw_ok == 0)
1977 			continue;
1978 
1979 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1980 		    pciide_pci_intr);
1981 
1982 		if (pciide_chan_candisable(cp)) {
1983 			chanenable &= ~AMD756_CHAN_EN(channel);
1984 		}
1985 		if (cp->hw_ok == 0) {
1986 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1987 			continue;
1988 		}
1989 
1990 		amd756_setup_channel(&cp->wdc_channel);
1991 	}
1992 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1993 	    chanenable);
1994 	return;
1995 }
1996 
1997 void
1998 amd756_setup_channel(chp)
1999 	struct channel_softc *chp;
2000 {
2001 	u_int32_t udmatim_reg, datatim_reg;
2002 	u_int8_t idedma_ctl;
2003 	int mode, drive;
2004 	struct ata_drive_datas *drvp;
2005 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2006 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2007 #ifndef	PCIIDE_AMD756_ENABLEDMA
2008 	int product = PCI_PRODUCT(
2009 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG));
2010 	int rev = PCI_REVISION(
2011 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2012 #endif
2013 
2014 	idedma_ctl = 0;
2015 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
2016 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
2017 	datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
2018 	udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
2019 
2020 	/* setup DMA if needed */
2021 	pciide_channel_dma_setup(cp);
2022 
2023 	for (drive = 0; drive < 2; drive++) {
2024 		drvp = &chp->ch_drive[drive];
2025 		/* If no drive, skip */
2026 		if ((drvp->drive_flags & DRIVE) == 0)
2027 			continue;
2028 		/* add timing values, setup DMA if needed */
2029 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2030 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2031 			mode = drvp->PIO_mode;
2032 			goto pio;
2033 		}
2034 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2035 		    (drvp->drive_flags & DRIVE_UDMA)) {
2036 			/* use Ultra/DMA */
2037 			drvp->drive_flags &= ~DRIVE_DMA;
2038 			udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
2039 			    AMD756_UDMA_EN_MTH(chp->channel, drive) |
2040 			    AMD756_UDMA_TIME(chp->channel, drive,
2041 				amd756_udma_tim[drvp->UDMA_mode]);
2042 			/* can use PIO timings, MW DMA unused */
2043 			mode = drvp->PIO_mode;
2044 		} else {
2045 			/* use Multiword DMA, but only if revision is OK */
2046 			drvp->drive_flags &= ~DRIVE_UDMA;
2047 #ifndef PCIIDE_AMD756_ENABLEDMA
2048 			/*
2049 			 * The workaround doesn't seem to be necessary
2050 			 * with all drives, so it can be disabled by
2051 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2052 			 * triggered.
2053 			 */
2054 			if (AMD756_CHIPREV_DISABLEDMA(product, rev)) {
2055 				printf("%s:%d:%d: multi-word DMA disabled due "
2056 				    "to chip revision\n",
2057 				    sc->sc_wdcdev.sc_dev.dv_xname,
2058 				    chp->channel, drive);
2059 				mode = drvp->PIO_mode;
2060 				drvp->drive_flags &= ~DRIVE_DMA;
2061 				goto pio;
2062 			}
2063 #endif
2064 			/* mode = min(pio, dma+2) */
2065 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2066 				mode = drvp->PIO_mode;
2067 			else
2068 				mode = drvp->DMA_mode + 2;
2069 		}
2070 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2071 
2072 pio:		/* setup PIO mode */
2073 		if (mode <= 2) {
2074 			drvp->DMA_mode = 0;
2075 			drvp->PIO_mode = 0;
2076 			mode = 0;
2077 		} else {
2078 			drvp->PIO_mode = mode;
2079 			drvp->DMA_mode = mode - 2;
2080 		}
2081 		datatim_reg |=
2082 		    AMD756_DATATIM_PULSE(chp->channel, drive,
2083 			amd756_pio_set[mode]) |
2084 		    AMD756_DATATIM_RECOV(chp->channel, drive,
2085 			amd756_pio_rec[mode]);
2086 	}
2087 	if (idedma_ctl != 0) {
2088 		/* Add software bits in status register */
2089 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2090 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2091 		    idedma_ctl);
2092 	}
2093 	pciide_print_modes(cp);
2094 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
2095 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
2096 }
2097 
2098 void
2099 apollo_chip_map(sc, pa)
2100 	struct pciide_softc *sc;
2101 	struct pci_attach_args *pa;
2102 {
2103 	struct pciide_channel *cp;
2104 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2105 	int channel;
2106 	u_int32_t ideconf;
2107 	bus_size_t cmdsize, ctlsize;
2108 	pcitag_t pcib_tag;
2109 	pcireg_t pcib_id, pcib_class;
2110 
2111 	if (pciide_chipen(sc, pa) == 0)
2112 		return;
2113 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2114 
2115 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2116 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2117 
2118 	switch (PCI_PRODUCT(pcib_id)) {
2119 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2120 		if (PCI_REVISION(pcib_class) >= 0x02) {
2121 			printf(": ATA33");
2122 			sc->sc_wdcdev.UDMA_cap = 2;
2123 		} else {
2124 			printf(": DMA");
2125 			sc->sc_wdcdev.UDMA_cap = 0;
2126 		}
2127 		break;
2128 	case PCI_PRODUCT_VIATECH_VT82C596A:
2129 		if (PCI_REVISION(pcib_class) >= 0x12) {
2130 			printf(": ATA66");
2131 			sc->sc_wdcdev.UDMA_cap = 4;
2132 		} else {
2133 			printf(": ATA33");
2134 			sc->sc_wdcdev.UDMA_cap = 2;
2135 		}
2136 		break;
2137 
2138 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2139 		if (PCI_REVISION(pcib_class) >= 0x40) {
2140 			printf(": ATA100");
2141 			sc->sc_wdcdev.UDMA_cap = 5;
2142 		} else {
2143 			printf(": ATA66");
2144 			sc->sc_wdcdev.UDMA_cap = 4;
2145 		}
2146 		break;
2147 	default:
2148 		printf(": DMA");
2149 		sc->sc_wdcdev.UDMA_cap = 0;
2150 		break;
2151 	}
2152 
2153 	pciide_mapreg_dma(sc, pa);
2154 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2155 	    WDC_CAPABILITY_MODE;
2156 	if (sc->sc_dma_ok) {
2157 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2158 		sc->sc_wdcdev.irqack = pciide_irqack;
2159 		if (sc->sc_wdcdev.UDMA_cap > 0)
2160 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2161 	}
2162 	sc->sc_wdcdev.PIO_cap = 4;
2163 	sc->sc_wdcdev.DMA_cap = 2;
2164 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2165 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2166 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2167 
2168 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2169 
2170 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2171 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2172 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2173 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2174 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2175 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2176 	    DEBUG_PROBE);
2177 
2178 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2179 		cp = &sc->pciide_channels[channel];
2180 		if (pciide_chansetup(sc, channel, interface) == 0)
2181 			continue;
2182 
2183 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2184 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2185 			printf("%s: %s ignored (disabled)\n",
2186 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2187 			continue;
2188 		}
2189 		pciide_map_compat_intr(pa, cp, channel, interface);
2190 		if (cp->hw_ok == 0)
2191 			continue;
2192 
2193 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2194 		    pciide_pci_intr);
2195 		if (cp->hw_ok == 0) {
2196 			goto next;
2197 		}
2198 		if (pciide_chan_candisable(cp)) {
2199 			ideconf &= ~APO_IDECONF_EN(channel);
2200 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2201 				    ideconf);
2202 		}
2203 
2204 		if (cp->hw_ok == 0)
2205 			goto next;
2206 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2207 next:
2208 		if (cp->hw_ok == 0)
2209 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2210 	}
2211 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2212 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2213 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2214 }
2215 
2216 void
2217 apollo_setup_channel(chp)
2218 	struct channel_softc *chp;
2219 {
2220 	u_int32_t udmatim_reg, datatim_reg;
2221 	u_int8_t idedma_ctl;
2222 	int mode, drive;
2223 	struct ata_drive_datas *drvp;
2224 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2225 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2226 
2227 	idedma_ctl = 0;
2228 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2229 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2230 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2231 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2232 
2233 	/* setup DMA if needed */
2234 	pciide_channel_dma_setup(cp);
2235 
2236 	/*
2237 	 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2238 	 * downgrade to Ultra/33 if needed
2239 	 */
2240 	if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2241 	    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2242 		/* both drives UDMA */
2243 		if (chp->ch_drive[0].UDMA_mode > 2 &&
2244 		    chp->ch_drive[1].UDMA_mode <= 2) {
2245 			/* drive 0 Ultra/66, drive 1 Ultra/33 */
2246 			chp->ch_drive[0].UDMA_mode = 2;
2247 		} else if (chp->ch_drive[1].UDMA_mode > 2 &&
2248 		    chp->ch_drive[0].UDMA_mode <= 2) {
2249 			/* drive 1 Ultra/66, drive 0 Ultra/33 */
2250 			chp->ch_drive[1].UDMA_mode = 2;
2251 		}
2252 	}
2253 
2254 	for (drive = 0; drive < 2; drive++) {
2255 		drvp = &chp->ch_drive[drive];
2256 		/* If no drive, skip */
2257 		if ((drvp->drive_flags & DRIVE) == 0)
2258 			continue;
2259 		/* add timing values, setup DMA if needed */
2260 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2261 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2262 			mode = drvp->PIO_mode;
2263 			goto pio;
2264 		}
2265 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2266 		    (drvp->drive_flags & DRIVE_UDMA)) {
2267 			/* use Ultra/DMA */
2268 			drvp->drive_flags &= ~DRIVE_DMA;
2269 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2270 			    APO_UDMA_EN_MTH(chp->channel, drive);
2271 
2272 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2273 				/* 686b */
2274 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2275 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2276 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2277 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2278 				/* 596b or 686a */
2279 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2280 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2281 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2282 			} else {
2283 				/* 596a or 586b */
2284 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2285 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2286 			}
2287 			/* can use PIO timings, MW DMA unused */
2288 			mode = drvp->PIO_mode;
2289 		} else {
2290 			/* use Multiword DMA */
2291 			drvp->drive_flags &= ~DRIVE_UDMA;
2292 			/* mode = min(pio, dma+2) */
2293 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2294 				mode = drvp->PIO_mode;
2295 			else
2296 				mode = drvp->DMA_mode + 2;
2297 		}
2298 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2299 
2300 pio:		/* setup PIO mode */
2301 		if (mode <= 2) {
2302 			drvp->DMA_mode = 0;
2303 			drvp->PIO_mode = 0;
2304 			mode = 0;
2305 		} else {
2306 			drvp->PIO_mode = mode;
2307 			drvp->DMA_mode = mode - 2;
2308 		}
2309 		datatim_reg |=
2310 		    APO_DATATIM_PULSE(chp->channel, drive,
2311 			apollo_pio_set[mode]) |
2312 		    APO_DATATIM_RECOV(chp->channel, drive,
2313 			apollo_pio_rec[mode]);
2314 	}
2315 	if (idedma_ctl != 0) {
2316 		/* Add software bits in status register */
2317 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2318 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2319 		    idedma_ctl);
2320 	}
2321 	pciide_print_modes(cp);
2322 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2323 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2324 }
2325 
2326 void
2327 cmd_channel_map(pa, sc, channel)
2328 	struct pci_attach_args *pa;
2329 	struct pciide_softc *sc;
2330 	int channel;
2331 {
2332 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2333 	bus_size_t cmdsize, ctlsize;
2334 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2335 	pcireg_t interface;
2336 
2337 	/*
2338 	 * The 0648/0649 can be told to identify as a RAID controller.
2339 	 * In this case, we have to fake interface
2340 	 */
2341 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2342 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2343 		    PCIIDE_INTERFACE_SETTABLE(1);
2344 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2345 		    CMD_CONF_DSA1)
2346 			interface |= PCIIDE_INTERFACE_PCI(0) |
2347 			    PCIIDE_INTERFACE_PCI(1);
2348 	} else {
2349 		interface = PCI_INTERFACE(pa->pa_class);
2350 	}
2351 
2352 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2353 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2354 	cp->wdc_channel.channel = channel;
2355 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2356 
2357 	if (channel > 0) {
2358 		cp->wdc_channel.ch_queue =
2359 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2360 	} else {
2361 		cp->wdc_channel.ch_queue =
2362 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2363 	}
2364 	if (cp->wdc_channel.ch_queue == NULL) {
2365 		printf(
2366 		    "%s: %s cannot allocate memory for command queue",
2367 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2368 		return;
2369 	}
2370 
2371 	/*
2372 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2373 	 * there's no way to disable the first channel without disabling
2374 	 * the whole device
2375 	 */
2376 	 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2377 		printf("%s: %s ignored (disabled)\n",
2378 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2379 		return;
2380 	}
2381 	cp->hw_ok = 1;
2382 	pciide_map_compat_intr(pa, cp, channel, interface);
2383 	if (cp->hw_ok == 0)
2384 		return;
2385 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2386 	if (cp->hw_ok == 0) {
2387 		pciide_unmap_compat_intr(pa, cp, channel, interface);
2388 		return;
2389 	}
2390 	if (channel == 1) {
2391 		if (pciide_chan_candisable(cp)) {
2392 			ctrl &= ~CMD_CTRL_2PORT;
2393 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2394 			    CMD_CTRL, ctrl);
2395 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2396 		}
2397 	}
2398 }
2399 
2400 int
2401 cmd_pci_intr(arg)
2402 	void *arg;
2403 {
2404 	struct pciide_softc *sc = arg;
2405 	struct pciide_channel *cp;
2406 	struct channel_softc *wdc_cp;
2407 	int i, rv, crv;
2408 	u_int32_t priirq, secirq;
2409 
2410 	rv = 0;
2411 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2412 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2413 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2414 		cp = &sc->pciide_channels[i];
2415 		wdc_cp = &cp->wdc_channel;
2416 		/* If a compat channel skip. */
2417 		if (cp->compat)
2418 			continue;
2419 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2420 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2421 			crv = wdcintr(wdc_cp);
2422 			if (crv == 0)
2423 				printf("%s:%d: bogus intr\n",
2424 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2425 			else
2426 				rv = 1;
2427 		}
2428 	}
2429 	return rv;
2430 }
2431 
2432 void
2433 cmd_chip_map(sc, pa)
2434 	struct pciide_softc *sc;
2435 	struct pci_attach_args *pa;
2436 {
2437 	int channel;
2438 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2439 	/*
2440  	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2441 	 * and base adresses registers can be disabled at
2442  	 * hardware level. In this case, the device is wired
2443 	 * in compat mode and its first channel is always enabled,
2444 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2445 	 * In fact, it seems that the first channel of the CMD PCI0640
2446 	 * can't be disabled.
2447  	 */
2448 
2449 #ifdef PCIIDE_CMD064x_DISABLE
2450 	if (pciide_chipen(sc, pa) == 0)
2451 		return;
2452 #endif
2453 
2454 	printf(": no DMA");
2455 	sc->sc_dma_ok = 0;
2456 
2457 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2458 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2459 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2460 
2461 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2462 
2463 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2464 		cmd_channel_map(pa, sc, channel);
2465 	}
2466 }
2467 
2468 void
2469 cmd0643_9_chip_map(sc, pa)
2470 	struct pciide_softc *sc;
2471 	struct pci_attach_args *pa;
2472 {
2473 	struct pciide_channel *cp;
2474 	int channel;
2475 	int rev = PCI_REVISION(
2476 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2477 	pcireg_t interface;
2478 
2479 	/*
2480 	 * The 0648/0649 can be told to identify as a RAID controller.
2481 	 * In this case, we have to fake interface
2482 	 */
2483 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2484 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2485 		    PCIIDE_INTERFACE_SETTABLE(1);
2486 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2487 		    CMD_CONF_DSA1)
2488 			interface |= PCIIDE_INTERFACE_PCI(0) |
2489 			    PCIIDE_INTERFACE_PCI(1);
2490 	} else {
2491 		interface = PCI_INTERFACE(pa->pa_class);
2492 	}
2493 
2494 	/*
2495 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2496 	 * and base adresses registers can be disabled at
2497 	 * hardware level. In this case, the device is wired
2498 	 * in compat mode and its first channel is always enabled,
2499  	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2500 	 * In fact, it seems that the first channel of the CMD PCI0640
2501 	 * can't be disabled.
2502 	*/
2503 
2504 #ifdef PCIIDE_CMD064x_DISABLE
2505 	if (pciide_chipen(sc, pa) == 0)
2506 		return;
2507 #endif
2508 	printf(": DMA");
2509 	pciide_mapreg_dma(sc, pa);
2510 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2511 	    WDC_CAPABILITY_MODE;
2512 	if (sc->sc_dma_ok) {
2513 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2514 		switch (sc->sc_pp->ide_product) {
2515 		case PCI_PRODUCT_CMDTECH_649:
2516 		case PCI_PRODUCT_CMDTECH_648:
2517 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2518 			sc->sc_wdcdev.UDMA_cap = 4;
2519 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2520 			break;
2521                 case PCI_PRODUCT_CMDTECH_646:
2522                         if (rev >= CMD0646U2_REV) {
2523                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2524                                 sc->sc_wdcdev.UDMA_cap = 2;
2525                         } else if (rev >= CMD0646U_REV) {
2526                         /*
2527                          * Linux's driver claims that the 646U is broken
2528                          * with UDMA. Only enable it if we know what we're
2529                          * doing
2530                          */
2531 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2532                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2533                                 sc->sc_wdcdev.UDMA_cap = 2;
2534 #endif
2535                                 /* explicitely disable UDMA */
2536                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2537                                     CMD_UDMATIM(0), 0);
2538                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2539                                     CMD_UDMATIM(1), 0);
2540                         }
2541                         sc->sc_wdcdev.irqack = cmd646_9_irqack;
2542                         break;
2543 		default:
2544 			sc->sc_wdcdev.irqack = pciide_irqack;
2545 		}
2546 	}
2547 
2548 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2549 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2550 	sc->sc_wdcdev.PIO_cap = 4;
2551 	sc->sc_wdcdev.DMA_cap = 2;
2552 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2553 
2554 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2555 
2556 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2557 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2558 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2559 		DEBUG_PROBE);
2560 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2561 		cp = &sc->pciide_channels[channel];
2562 		cmd_channel_map(pa, sc, channel);
2563 		if (cp->hw_ok == 0)
2564 			continue;
2565 		cmd0643_9_setup_channel(&cp->wdc_channel);
2566 	}
2567 	/*
2568 	 * note - this also makes sure we clear the irq disable and reset
2569 	 * bits
2570 	 */
2571 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2572 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2573 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2574 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2575 	    DEBUG_PROBE);
2576 }
2577 
2578 void
2579 cmd0643_9_setup_channel(chp)
2580 	struct channel_softc *chp;
2581 {
2582 	struct ata_drive_datas *drvp;
2583 	u_int8_t tim;
2584 	u_int32_t idedma_ctl, udma_reg;
2585 	int drive;
2586 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2587 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2588 
2589 	idedma_ctl = 0;
2590 	/* setup DMA if needed */
2591 	pciide_channel_dma_setup(cp);
2592 
2593 	for (drive = 0; drive < 2; drive++) {
2594 		drvp = &chp->ch_drive[drive];
2595 		/* If no drive, skip */
2596 		if ((drvp->drive_flags & DRIVE) == 0)
2597 			continue;
2598 		/* add timing values, setup DMA if needed */
2599 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2600 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2601 			if (drvp->drive_flags & DRIVE_UDMA) {
2602 				/* UltraDMA on a 646U2, 0648 or 0649 */
2603 				drvp->drive_flags &= ~DRIVE_DMA;
2604 				udma_reg = pciide_pci_read(sc->sc_pc,
2605 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2606 				if (drvp->UDMA_mode > 2 &&
2607 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2608 				    CMD_BICSR) &
2609 				    CMD_BICSR_80(chp->channel)) == 0)
2610 					drvp->UDMA_mode = 2;
2611 				if (drvp->UDMA_mode > 2)
2612 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2613 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2614 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2615 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2616 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2617 				    CMD_UDMATIM_TIM_OFF(drive));
2618 				udma_reg |=
2619 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2620 				    CMD_UDMATIM_TIM_OFF(drive));
2621 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2622 				    CMD_UDMATIM(chp->channel), udma_reg);
2623 			} else {
2624 				/*
2625 				 * use Multiword DMA.
2626 				 * Timings will be used for both PIO and DMA,
2627 				 * so adjust DMA mode if needed
2628 				 * if we have a 0646U2/8/9, turn off UDMA
2629 				 */
2630 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2631 					udma_reg = pciide_pci_read(sc->sc_pc,
2632 					    sc->sc_tag,
2633 					    CMD_UDMATIM(chp->channel));
2634 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2635 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2636 					    CMD_UDMATIM(chp->channel),
2637 					    udma_reg);
2638 				}
2639 				if (drvp->PIO_mode >= 3 &&
2640 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2641 					drvp->DMA_mode = drvp->PIO_mode - 2;
2642 				}
2643 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2644 			}
2645 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2646 		}
2647 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2648 		    CMD_DATA_TIM(chp->channel, drive), tim);
2649 	}
2650 	if (idedma_ctl != 0) {
2651 		/* Add software bits in status register */
2652 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2653 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2654 		    idedma_ctl);
2655 	}
2656 	pciide_print_modes(cp);
2657 }
2658 
2659 void
2660 cmd646_9_irqack(chp)
2661         struct channel_softc *chp;
2662 {
2663         u_int32_t priirq, secirq;
2664         struct pciide_channel *cp = (struct pciide_channel*)chp;
2665         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2666 
2667         if (chp->channel == 0) {
2668                 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2669                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2670         } else {
2671                 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2672                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2673         }
2674         pciide_irqack(chp);
2675 }
2676 
2677 void
2678 cy693_chip_map(sc, pa)
2679 	struct pciide_softc *sc;
2680 	struct pci_attach_args *pa;
2681 {
2682 	struct pciide_channel *cp;
2683 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2684 	bus_size_t cmdsize, ctlsize;
2685 
2686 	if (pciide_chipen(sc, pa) == 0)
2687 		return;
2688 	/*
2689 	 * this chip has 2 PCI IDE functions, one for primary and one for
2690 	 * secondary. So we need to call pciide_mapregs_compat() with
2691 	 * the real channel
2692 	 */
2693 	if (pa->pa_function == 1) {
2694 		sc->sc_cy_compatchan = 0;
2695 	} else if (pa->pa_function == 2) {
2696 		sc->sc_cy_compatchan = 1;
2697 	} else {
2698 		printf(": unexpected PCI function %d\n", pa->pa_function);
2699 		return;
2700 	}
2701 
2702 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2703 		printf(": DMA");
2704 		pciide_mapreg_dma(sc, pa);
2705 	} else {
2706 		printf(": no DMA");
2707 		sc->sc_dma_ok = 0;
2708 	}
2709 
2710 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2711 	if (sc->sc_cy_handle == NULL) {
2712 		printf(", (unable to map ctl registers)");
2713 		sc->sc_dma_ok = 0;
2714 	}
2715 
2716 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2717 	    WDC_CAPABILITY_MODE;
2718 	if (sc->sc_dma_ok) {
2719 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2720 		sc->sc_wdcdev.irqack = pciide_irqack;
2721 	}
2722 	sc->sc_wdcdev.PIO_cap = 4;
2723 	sc->sc_wdcdev.DMA_cap = 2;
2724 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2725 
2726 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2727 	sc->sc_wdcdev.nchannels = 1;
2728 
2729 	/* Only one channel for this chip; if we are here it's enabled */
2730 	cp = &sc->pciide_channels[0];
2731 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2732 	cp->name = PCIIDE_CHANNEL_NAME(0);
2733 	cp->wdc_channel.channel = 0;
2734 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2735 	cp->wdc_channel.ch_queue =
2736 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2737 	if (cp->wdc_channel.ch_queue == NULL) {
2738 		printf(": cannot allocate memory for command queue\n");
2739 		return;
2740 	}
2741 	printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0),
2742 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2743 	    "configured" : "wired");
2744 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2745 		printf("native-PCI\n");
2746 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2747 		    pciide_pci_intr);
2748 	} else {
2749 		printf("compatibility\n");
2750 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2751 		    &cmdsize, &ctlsize);
2752 	}
2753 
2754 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2755 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2756 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2757 	if (cp->hw_ok == 0)
2758 		return;
2759 	wdcattach(&cp->wdc_channel);
2760 	if (pciide_chan_candisable(cp)) {
2761 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2762 		    PCI_COMMAND_STATUS_REG, 0);
2763 	}
2764 	if (cp->hw_ok == 0) {
2765 		pciide_unmap_compat_intr(pa, cp, sc->sc_cy_compatchan,
2766 		    interface);
2767 		return;
2768 	}
2769 
2770 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2771 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2772 	cy693_setup_channel(&cp->wdc_channel);
2773 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2774 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2775 }
2776 
2777 void
2778 cy693_setup_channel(chp)
2779 	struct channel_softc *chp;
2780 {
2781 	struct ata_drive_datas *drvp;
2782 	int drive;
2783 	u_int32_t cy_cmd_ctrl;
2784 	u_int32_t idedma_ctl;
2785 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2786 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2787 	int dma_mode = -1;
2788 
2789 	cy_cmd_ctrl = idedma_ctl = 0;
2790 
2791 	/* setup DMA if needed */
2792 	pciide_channel_dma_setup(cp);
2793 
2794 	for (drive = 0; drive < 2; drive++) {
2795 		drvp = &chp->ch_drive[drive];
2796 		/* If no drive, skip */
2797 		if ((drvp->drive_flags & DRIVE) == 0)
2798 			continue;
2799 		/* add timing values, setup DMA if needed */
2800 		if (drvp->drive_flags & DRIVE_DMA) {
2801 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2802 			/* use Multiword DMA */
2803 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2804 				dma_mode = drvp->DMA_mode;
2805 		}
2806 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2807 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2808 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2809 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2810 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2811 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2812 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2813 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2814 	}
2815 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2816 	chp->ch_drive[0].DMA_mode = dma_mode;
2817 	chp->ch_drive[1].DMA_mode = dma_mode;
2818 
2819 	if (dma_mode == -1)
2820 		dma_mode = 0;
2821 
2822 	if (sc->sc_cy_handle != NULL) {
2823 		/* Note: `multiple' is implied. */
2824 		cy82c693_write(sc->sc_cy_handle,
2825 		    (sc->sc_cy_compatchan == 0) ?
2826 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2827 	}
2828 
2829 	pciide_print_modes(cp);
2830 
2831 	if (idedma_ctl != 0) {
2832 		/* Add software bits in status register */
2833 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2834 		    IDEDMA_CTL, idedma_ctl);
2835 	}
2836 }
2837 
2838 void
2839 sis_chip_map(sc, pa)
2840 	struct pciide_softc *sc;
2841 	struct pci_attach_args *pa;
2842 {
2843 	struct pciide_channel *cp;
2844 	int channel;
2845 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2846 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2847 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2848 	bus_size_t cmdsize, ctlsize;
2849 	pcitag_t pchb_tag;
2850 	pcireg_t pchb_id, pchb_class;
2851 
2852 	if (pciide_chipen(sc, pa) == 0)
2853 		return;
2854 
2855 	printf(": DMA");
2856 	pciide_mapreg_dma(sc, pa);
2857 
2858 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2859 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2860 	/* and read ID and rev of the ISA bridge */
2861 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2862 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2863 
2864 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2865 	    WDC_CAPABILITY_MODE;
2866 	if (sc->sc_dma_ok) {
2867 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2868 		sc->sc_wdcdev.irqack = pciide_irqack;
2869 		/*
2870 		 * controllers with rev > 0xd0 support UDMA 2 at least
2871 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2872 		 * have problems with UDMA
2873 		 */
2874 		if (rev >= 0xd0 &&
2875 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_SiS530 ||
2876 		    PCI_REVISION(pchb_class) >= 0x03))
2877 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2878 	}
2879 
2880 	sc->sc_wdcdev.PIO_cap = 4;
2881 	sc->sc_wdcdev.DMA_cap = 2;
2882 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2883 		sc->sc_wdcdev.UDMA_cap = 2;
2884 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2885 
2886 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2887 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2888 
2889 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2890 
2891 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2892 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2893 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2894 
2895 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2896 		cp = &sc->pciide_channels[channel];
2897 		if (pciide_chansetup(sc, channel, interface) == 0)
2898 			continue;
2899 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2900 	 	    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2901 			printf("%s: %s ignored (disabled)\n",
2902 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2903 			continue;
2904 		}
2905 		pciide_map_compat_intr(pa, cp, channel, interface);
2906 		if (cp->hw_ok == 0)
2907 			continue;
2908 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2909 		    pciide_pci_intr);
2910 		if (cp->hw_ok == 0) {
2911 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2912 			continue;
2913 		}
2914 		if (pciide_chan_candisable(cp)) {
2915 			if (channel == 0)
2916 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2917 			else
2918 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2919 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2920 			    sis_ctr0);
2921 		}
2922 		if (cp->hw_ok == 0) {
2923 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2924 			continue;
2925 		}
2926 		sis_setup_channel(&cp->wdc_channel);
2927 	}
2928 }
2929 
2930 void
2931 sis_setup_channel(chp)
2932 	struct channel_softc *chp;
2933 {
2934 	struct ata_drive_datas *drvp;
2935 	int drive;
2936 	u_int32_t sis_tim;
2937 	u_int32_t idedma_ctl;
2938 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2939 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2940 
2941 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2942 	    "channel %d 0x%x\n", chp->channel,
2943 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2944 	    DEBUG_PROBE);
2945 	sis_tim = 0;
2946 	idedma_ctl = 0;
2947 	/* setup DMA if needed */
2948 	pciide_channel_dma_setup(cp);
2949 
2950 	for (drive = 0; drive < 2; drive++) {
2951 		drvp = &chp->ch_drive[drive];
2952 		/* If no drive, skip */
2953 		if ((drvp->drive_flags & DRIVE) == 0)
2954 			continue;
2955 		/* add timing values, setup DMA if needed */
2956 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2957 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2958 			goto pio;
2959 
2960 		if (drvp->drive_flags & DRIVE_UDMA) {
2961 			/* use Ultra/DMA */
2962 			drvp->drive_flags &= ~DRIVE_DMA;
2963 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2964 			    SIS_TIM_UDMA_TIME_OFF(drive);
2965 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2966 		} else {
2967 			/*
2968 			 * use Multiword DMA
2969 			 * Timings will be used for both PIO and DMA,
2970 			 * so adjust DMA mode if needed
2971 			 */
2972 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2973 				drvp->PIO_mode = drvp->DMA_mode + 2;
2974 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2975 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2976 				    drvp->PIO_mode - 2 : 0;
2977 			if (drvp->DMA_mode == 0)
2978 				drvp->PIO_mode = 0;
2979 		}
2980 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2981 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2982 		    SIS_TIM_ACT_OFF(drive);
2983 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2984 		    SIS_TIM_REC_OFF(drive);
2985 	}
2986 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2987 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2988 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2989 	if (idedma_ctl != 0) {
2990 		/* Add software bits in status register */
2991 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2992 		    IDEDMA_CTL, idedma_ctl);
2993 	}
2994 	pciide_print_modes(cp);
2995 }
2996 
2997 void
2998 acer_chip_map(sc, pa)
2999 	struct pciide_softc *sc;
3000 	struct pci_attach_args *pa;
3001 {
3002 	struct pciide_channel *cp;
3003 	int channel;
3004 	pcireg_t cr, interface;
3005 	bus_size_t cmdsize, ctlsize;
3006 	pcireg_t rev = PCI_REVISION(pa->pa_class);
3007 
3008 	if (pciide_chipen(sc, pa) == 0)
3009 		return;
3010 
3011 	printf(": DMA");
3012 	pciide_mapreg_dma(sc, pa);
3013 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3014 	    WDC_CAPABILITY_MODE;
3015 
3016 	if (rev < 0xC4)
3017 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA;
3018 
3019 	if (sc->sc_dma_ok) {
3020 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3021 		if (rev >= 0x20) {
3022 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3023 			if (rev >= 0xC4)
3024 				sc->sc_wdcdev.UDMA_cap = 5;
3025 			else if (rev >= 0xC2)
3026 				sc->sc_wdcdev.UDMA_cap = 4;
3027 			else
3028 				sc->sc_wdcdev.UDMA_cap = 2;
3029 		}
3030 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3031 		sc->sc_wdcdev.irqack = pciide_irqack;
3032 	}
3033 
3034 	sc->sc_wdcdev.PIO_cap = 4;
3035 	sc->sc_wdcdev.DMA_cap = 2;
3036 	sc->sc_wdcdev.set_modes = acer_setup_channel;
3037 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3038 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3039 
3040 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3041 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3042 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3043 
3044 	/* Enable "microsoft register bits" R/W. */
3045 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3046 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3047 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3048 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3049 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3050 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3051 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3052 	    ~ACER_CHANSTATUSREGS_RO);
3053 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3054 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3055 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3056 	/* Don't use cr, re-read the real register content instead */
3057 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3058 	    PCI_CLASS_REG));
3059 
3060 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3061 
3062 	/* From linux: enable "Cable Detection" */
3063 	if (rev >= 0xC2) {
3064 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3065 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3066 		    | ACER_0x4B_CDETECT);
3067 		/* set south-bridge's enable bit, m1533, 0x79 */
3068 		if (rev == 0xC2)
3069 			/* 1543C-B0 (m1533, 0x79, bit 2) */
3070 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3071 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3072 			    | ACER_0x79_REVC2_EN);
3073 		else
3074 			/* 1553/1535 (m1533, 0x79, bit 1) */
3075 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3076 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3077 			    | ACER_0x79_EN);
3078 	}
3079 
3080 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3081 		cp = &sc->pciide_channels[channel];
3082 		if (pciide_chansetup(sc, channel, interface) == 0)
3083 			continue;
3084 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3085 			printf("%s: %s ignored (disabled)\n",
3086 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3087 			continue;
3088 		}
3089 		pciide_map_compat_intr(pa, cp, channel, interface);
3090 		if (cp->hw_ok == 0)
3091 			continue;
3092 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3093 		    (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3094 		if (cp->hw_ok == 0) {
3095 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3096 			continue;
3097 		}
3098 		if (pciide_chan_candisable(cp)) {
3099 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3100 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3101 			    PCI_CLASS_REG, cr);
3102 		}
3103 		if (cp->hw_ok == 0) {
3104 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3105 			continue;
3106 		}
3107 		acer_setup_channel(&cp->wdc_channel);
3108 	}
3109 }
3110 
3111 void
3112 acer_setup_channel(chp)
3113 	struct channel_softc *chp;
3114 {
3115 	struct ata_drive_datas *drvp;
3116 	int drive;
3117 	u_int32_t acer_fifo_udma;
3118 	u_int32_t idedma_ctl;
3119 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3120 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3121 
3122 	idedma_ctl = 0;
3123 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3124 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3125 	    acer_fifo_udma), DEBUG_PROBE);
3126 	/* setup DMA if needed */
3127 	pciide_channel_dma_setup(cp);
3128 
3129 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3130 	    DRIVE_UDMA)	{	/* check 80 pins cable */
3131 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3132 		    ACER_0x4A_80PIN(chp->channel)) {
3133 			if (chp->ch_drive[0].UDMA_mode > 2)
3134 				chp->ch_drive[0].UDMA_mode = 2;
3135 			if (chp->ch_drive[1].UDMA_mode > 2)
3136 				chp->ch_drive[1].UDMA_mode = 2;
3137 		}
3138 	}
3139 
3140 	for (drive = 0; drive < 2; drive++) {
3141 		drvp = &chp->ch_drive[drive];
3142 		/* If no drive, skip */
3143 		if ((drvp->drive_flags & DRIVE) == 0)
3144 			continue;
3145 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3146 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3147 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3148 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3149 		/* clear FIFO/DMA mode */
3150 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3151 		    ACER_UDMA_EN(chp->channel, drive) |
3152 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3153 
3154 		/* add timing values, setup DMA if needed */
3155 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3156 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3157 			acer_fifo_udma |=
3158 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3159 			goto pio;
3160 		}
3161 
3162 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3163 		if (drvp->drive_flags & DRIVE_UDMA) {
3164 			/* use Ultra/DMA */
3165 			drvp->drive_flags &= ~DRIVE_DMA;
3166 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3167 			acer_fifo_udma |=
3168 			    ACER_UDMA_TIM(chp->channel, drive,
3169 				acer_udma[drvp->UDMA_mode]);
3170 			/* XXX disable if one drive < UDMA3 ? */
3171 			if (drvp->UDMA_mode >= 3) {
3172 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3173 				    ACER_0x4B,
3174 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3175 				        ACER_0x4B) | ACER_0x4B_UDMA66);
3176 			}
3177 		} else {
3178 			/*
3179 			 * use Multiword DMA
3180 			 * Timings will be used for both PIO and DMA,
3181 			 * so adjust DMA mode if needed
3182 			 */
3183 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3184 				drvp->PIO_mode = drvp->DMA_mode + 2;
3185 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3186 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3187 				    drvp->PIO_mode - 2 : 0;
3188 			if (drvp->DMA_mode == 0)
3189 				drvp->PIO_mode = 0;
3190 		}
3191 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3192 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3193 		    ACER_IDETIM(chp->channel, drive),
3194 		    acer_pio[drvp->PIO_mode]);
3195 	}
3196 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3197 	    acer_fifo_udma), DEBUG_PROBE);
3198 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3199 	if (idedma_ctl != 0) {
3200 		/* Add software bits in status register */
3201 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3202 		    IDEDMA_CTL, idedma_ctl);
3203 	}
3204 	pciide_print_modes(cp);
3205 }
3206 
3207 int
3208 acer_pci_intr(arg)
3209 	void *arg;
3210 {
3211 	struct pciide_softc *sc = arg;
3212 	struct pciide_channel *cp;
3213 	struct channel_softc *wdc_cp;
3214 	int i, rv, crv;
3215 	u_int32_t chids;
3216 
3217 	rv = 0;
3218 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3219 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3220 		cp = &sc->pciide_channels[i];
3221 		wdc_cp = &cp->wdc_channel;
3222 		/* If a compat channel skip. */
3223 		if (cp->compat)
3224 			continue;
3225 		if (chids & ACER_CHIDS_INT(i)) {
3226 			crv = wdcintr(wdc_cp);
3227 			if (crv == 0)
3228 				printf("%s:%d: bogus intr\n",
3229 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3230 			else
3231 				rv = 1;
3232 		}
3233 	}
3234 	return rv;
3235 }
3236 
3237 void
3238 hpt_chip_map(sc, pa)
3239 	struct pciide_softc *sc;
3240 	struct pci_attach_args *pa;
3241 {
3242 	struct pciide_channel *cp;
3243 	int i, compatchan, revision;
3244 	pcireg_t interface;
3245 	bus_size_t cmdsize, ctlsize;
3246 
3247 	if (pciide_chipen(sc, pa) == 0)
3248 		return;
3249 	revision = PCI_REVISION(pa->pa_class);
3250 
3251 	/*
3252 	 * when the chip is in native mode it identifies itself as a
3253 	 * 'misc mass storage'. Fake interface in this case.
3254 	 */
3255 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3256 		interface = PCI_INTERFACE(pa->pa_class);
3257 	} else {
3258 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3259 		    PCIIDE_INTERFACE_PCI(0);
3260 		if (revision == HPT370_REV || revision == HPT370A_REV)
3261 			interface |= PCIIDE_INTERFACE_PCI(1);
3262 	}
3263 
3264 	printf(": DMA");
3265 	pciide_mapreg_dma(sc, pa);
3266 	printf("\n");
3267 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3268 	    WDC_CAPABILITY_MODE;
3269 	if (sc->sc_dma_ok) {
3270 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3271 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3272 		sc->sc_wdcdev.irqack = pciide_irqack;
3273 	}
3274 	sc->sc_wdcdev.PIO_cap = 4;
3275 	sc->sc_wdcdev.DMA_cap = 2;
3276 
3277 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3278 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3279 	if (revision < HPT370_REV) {
3280 		sc->sc_wdcdev.UDMA_cap = 4;
3281 		/*
3282 		 * The 366 has 2 PCI IDE functions, one for primary and one
3283 		 * for secondary. So we need to call pciide_mapregs_compat()
3284 		 * with the real channel
3285 		 */
3286 		if (pa->pa_function == 0) {
3287 			compatchan = 0;
3288 		} else if (pa->pa_function == 1) {
3289 			compatchan = 1;
3290 		} else {
3291 			printf("%s: unexpected PCI function %d\n",
3292 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3293 			return;
3294 		}
3295 		sc->sc_wdcdev.nchannels = 1;
3296 	} else {
3297 		sc->sc_wdcdev.nchannels = 2;
3298 		sc->sc_wdcdev.UDMA_cap = 5;
3299 	}
3300 	 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3301 		cp = &sc->pciide_channels[i];
3302 		if (sc->sc_wdcdev.nchannels > 1) {
3303 			compatchan = i;
3304 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3305 			    HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3306 				printf("%s: %s ignored (disabled)\n",
3307 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3308 				continue;
3309 			}
3310 		}
3311 		if (pciide_chansetup(sc, i, interface) == 0)
3312 			continue;
3313 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3314 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3315 			    &ctlsize, hpt_pci_intr);
3316 		} else {
3317 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3318 			    &cmdsize, &ctlsize);
3319 		}
3320 		if (cp->hw_ok == 0)
3321 			return;
3322 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3323 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3324 		wdcattach(&cp->wdc_channel);
3325 		hpt_setup_channel(&cp->wdc_channel);
3326 	}
3327 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3328 		/*
3329 		 * HPT370_REV has a bit to disable interrupts, make sure
3330 		 * to clear it
3331 		 */
3332 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3333 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3334 		    ~HPT_CSEL_IRQDIS);
3335 	}
3336 	return;
3337 }
3338 
3339 void
3340 hpt_setup_channel(chp)
3341 	struct channel_softc *chp;
3342 {
3343 	struct ata_drive_datas *drvp;
3344 	int drive;
3345 	int cable;
3346 	u_int32_t before, after;
3347 	u_int32_t idedma_ctl;
3348 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3349 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3350 
3351 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3352 
3353 	/* setup DMA if needed */
3354 	pciide_channel_dma_setup(cp);
3355 
3356 	idedma_ctl = 0;
3357 
3358 	/* Per drive settings */
3359 	for (drive = 0; drive < 2; drive++) {
3360 		drvp = &chp->ch_drive[drive];
3361 		/* If no drive, skip */
3362 		if ((drvp->drive_flags & DRIVE) == 0)
3363 			continue;
3364 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3365 					HPT_IDETIM(chp->channel, drive));
3366 
3367 		/* add timing values, setup DMA if needed */
3368 		if (drvp->drive_flags & DRIVE_UDMA) {
3369 			/* use Ultra/DMA */
3370 			drvp->drive_flags &= ~DRIVE_DMA;
3371 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3372 			    drvp->UDMA_mode > 2)
3373 				drvp->UDMA_mode = 2;
3374 			after = (sc->sc_wdcdev.nchannels == 2) ?
3375 			    hpt370_udma[drvp->UDMA_mode] :
3376 			    hpt366_udma[drvp->UDMA_mode];
3377 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3378 		} else if (drvp->drive_flags & DRIVE_DMA) {
3379 			/*
3380 			 * use Multiword DMA.
3381 			 * Timings will be used for both PIO and DMA, so adjust
3382 			 * DMA mode if needed
3383 			 */
3384 			if (drvp->PIO_mode >= 3 &&
3385 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3386 				drvp->DMA_mode = drvp->PIO_mode - 2;
3387 			}
3388 			after = (sc->sc_wdcdev.nchannels == 2) ?
3389 			    hpt370_dma[drvp->DMA_mode] :
3390 			    hpt366_dma[drvp->DMA_mode];
3391 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3392 		} else {
3393 			/* PIO only */
3394 			after = (sc->sc_wdcdev.nchannels == 2) ?
3395 			    hpt370_pio[drvp->PIO_mode] :
3396 			    hpt366_pio[drvp->PIO_mode];
3397 		}
3398 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3399 		    HPT_IDETIM(chp->channel, drive), after);
3400 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3401 		    "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname,
3402 		    after, before), DEBUG_PROBE);
3403 	}
3404 	if (idedma_ctl != 0) {
3405 		/* Add software bits in status register */
3406 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3407 		    IDEDMA_CTL, idedma_ctl);
3408 	}
3409 	pciide_print_modes(cp);
3410 }
3411 
3412 int
3413 hpt_pci_intr(arg)
3414 	void *arg;
3415 {
3416 	struct pciide_softc *sc = arg;
3417 	struct pciide_channel *cp;
3418 	struct channel_softc *wdc_cp;
3419 	int rv = 0;
3420 	int dmastat, i, crv;
3421 
3422 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3423 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3424 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3425 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
3426 		    continue;
3427 		cp = &sc->pciide_channels[i];
3428 		wdc_cp = &cp->wdc_channel;
3429 		crv = wdcintr(wdc_cp);
3430 		if (crv == 0) {
3431 			printf("%s:%d: bogus intr\n",
3432 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3433 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3434 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3435 		} else
3436 			rv = 1;
3437 	}
3438 	return rv;
3439 }
3440 
3441 
3442 /* Macros to test product */
3443 #define PDC_IS_262(sc)							\
3444 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 ||	\
3445 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265  ||	\
3446 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267)
3447 #define PDC_IS_265(sc)							\
3448 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 ||	\
3449         (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267)
3450 
3451 
3452 
3453 void
3454 pdc202xx_chip_map(sc, pa)
3455 	struct pciide_softc *sc;
3456 	struct pci_attach_args *pa;
3457 {
3458 	struct pciide_channel *cp;
3459 	int channel;
3460 	pcireg_t interface, st, mode;
3461 	bus_size_t cmdsize, ctlsize;
3462 
3463 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3464 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3465 	    DEBUG_PROBE);
3466 	if (pciide_chipen(sc, pa) == 0)
3467 		return;
3468 
3469 	/* turn off  RAID mode */
3470 	st &= ~PDC2xx_STATE_IDERAID;
3471 
3472 	/*
3473  	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3474 	 * mode. We have to fake interface
3475 	 */
3476 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3477 	if (st & PDC2xx_STATE_NATIVE)
3478 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3479 
3480 	printf(": DMA");
3481 	pciide_mapreg_dma(sc, pa);
3482 
3483 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3484 	    WDC_CAPABILITY_MODE | WDC_CAPABILITY_NO_ATAPI_DMA;
3485 	if (sc->sc_dma_ok) {
3486 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3487 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3488 		sc->sc_wdcdev.irqack = pciide_irqack;
3489 	}
3490 	sc->sc_wdcdev.PIO_cap = 4;
3491 	sc->sc_wdcdev.DMA_cap = 2;
3492 	if (PDC_IS_265(sc))
3493 		sc->sc_wdcdev.UDMA_cap = 5;
3494 	else if (PDC_IS_262(sc))
3495 		sc->sc_wdcdev.UDMA_cap = 4;
3496 	else
3497 		sc->sc_wdcdev.UDMA_cap = 2;
3498 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3499 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3500 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3501 
3502 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3503 
3504 	/* setup failsafe defaults */
3505 	mode = 0;
3506 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3507 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3508 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3509 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3510 
3511 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3512 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3513 		    "initial timings  0x%x, now 0x%x\n", channel,
3514 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3515 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3516 		    DEBUG_PROBE);
3517 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3518 		    mode | PDC2xx_TIM_IORDYp);
3519 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3520 		    "initial timings  0x%x, now 0x%x\n", channel,
3521 		pci_conf_read(sc->sc_pc, sc->sc_tag,
3522 	 	    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3523 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3524 		    mode);
3525 	}
3526 
3527 	mode = PDC2xx_SCR_DMA;
3528 	if (PDC_IS_262(sc)) {
3529 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3530 	} else {
3531 		/* the BIOS set it up this way */
3532 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3533 	}
3534 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3535 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3536 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3537 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3538 	    DEBUG_PROBE);
3539 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3540 
3541 	/* controller initial state register is OK even without BIOS */
3542 	/* Set DMA mode to IDE DMA compatibility */
3543 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3544 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3545 	    DEBUG_PROBE);
3546 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3547 	    mode | 0x1);
3548 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3549 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3550 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3551 	    mode | 0x1);
3552 
3553 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3554 		cp = &sc->pciide_channels[channel];
3555 		if (pciide_chansetup(sc, channel, interface) == 0)
3556 			continue;
3557 		if ((st & (PDC_IS_262(sc) ?
3558 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3559 			printf("%s: %s ignored (disabled)\n",
3560 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3561 			continue;
3562 		}
3563 		pciide_map_compat_intr(pa, cp, channel, interface);
3564 		if (cp->hw_ok == 0)
3565 			continue;
3566 		if (PDC_IS_265(sc))
3567 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3568 			    pdc20265_pci_intr);
3569 		else
3570 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3571 			    pdc202xx_pci_intr);
3572 		if (cp->hw_ok == 0) {
3573 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3574 			continue;
3575 		}
3576 		if (pciide_chan_candisable(cp)) {
3577 			st &= ~(PDC_IS_262(sc) ?
3578 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3579 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3580 		}
3581 		pdc202xx_setup_channel(&cp->wdc_channel);
3582         }
3583 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3584 	    DEBUG_PROBE);
3585 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3586 	return;
3587 }
3588 
3589 void
3590 pdc202xx_setup_channel(chp)
3591 	struct channel_softc *chp;
3592 {
3593 	struct ata_drive_datas *drvp;
3594 	int drive;
3595 	pcireg_t mode, st;
3596 	u_int32_t idedma_ctl, scr, atapi;
3597 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3598 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3599 	int channel = chp->channel;
3600 
3601 	/* setup DMA if needed */
3602 	pciide_channel_dma_setup(cp);
3603 
3604 	idedma_ctl = 0;
3605 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3606 	    sc->sc_wdcdev.sc_dev.dv_xname,
3607 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3608 	    DEBUG_PROBE);
3609 
3610 	/* Per channel settings */
3611 	if (PDC_IS_262(sc)) {
3612 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3613 		    PDC262_U66);
3614 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3615 		/* Trim UDMA mode */
3616 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3617 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3618 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3619 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3620 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3621 			if (chp->ch_drive[0].UDMA_mode > 2)
3622 				chp->ch_drive[0].UDMA_mode = 2;
3623 			if (chp->ch_drive[1].UDMA_mode > 2)
3624 				chp->ch_drive[1].UDMA_mode = 2;
3625 		}
3626 		/* Set U66 if needed */
3627 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3628 		    chp->ch_drive[0].UDMA_mode > 2) ||
3629 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3630 		    chp->ch_drive[1].UDMA_mode > 2))
3631 			scr |= PDC262_U66_EN(channel);
3632 		else
3633 			scr &= ~PDC262_U66_EN(channel);
3634 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3635 		    PDC262_U66, scr);
3636 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3637 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3638 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3639 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3640 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3641 		    chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3642 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3643 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3644 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3645 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3646 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3647 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3648 				atapi = 0;
3649 			else
3650 				atapi = PDC262_ATAPI_UDMA;
3651 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3652 			    PDC262_ATAPI(channel), atapi);
3653 		}
3654 	}
3655 	for (drive = 0; drive < 2; drive++) {
3656 		drvp = &chp->ch_drive[drive];
3657 		/* If no drive, skip */
3658 		if ((drvp->drive_flags & DRIVE) == 0)
3659 			continue;
3660 		mode = 0;
3661 		if (drvp->drive_flags & DRIVE_UDMA) {
3662 			/* use Ultra/DMA */
3663 			drvp->drive_flags &= ~DRIVE_DMA;
3664 			mode = PDC2xx_TIM_SET_MB(mode,
3665 			   pdc2xx_udma_mb[drvp->UDMA_mode]);
3666 			mode = PDC2xx_TIM_SET_MC(mode,
3667 			   pdc2xx_udma_mc[drvp->UDMA_mode]);
3668 			drvp->drive_flags &= ~DRIVE_DMA;
3669 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3670 		} else if (drvp->drive_flags & DRIVE_DMA) {
3671 			mode = PDC2xx_TIM_SET_MB(mode,
3672 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3673 			mode = PDC2xx_TIM_SET_MC(mode,
3674 			   pdc2xx_dma_mc[drvp->DMA_mode]);
3675 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3676 		} else {
3677 			mode = PDC2xx_TIM_SET_MB(mode,
3678 			    pdc2xx_dma_mb[0]);
3679 			mode = PDC2xx_TIM_SET_MC(mode,
3680 			    pdc2xx_dma_mc[0]);
3681 		}
3682 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3683 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3684 		if (drvp->drive_flags & DRIVE_ATA)
3685 			mode |= PDC2xx_TIM_PRE;
3686 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3687 		if (drvp->PIO_mode >= 3) {
3688 			mode |= PDC2xx_TIM_IORDY;
3689 			if (drive == 0)
3690 				mode |= PDC2xx_TIM_IORDYp;
3691 		}
3692 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3693 		    "timings 0x%x\n",
3694 		    sc->sc_wdcdev.sc_dev.dv_xname,
3695 		    chp->channel, drive, mode), DEBUG_PROBE);
3696 		    pci_conf_write(sc->sc_pc, sc->sc_tag,
3697 		    PDC2xx_TIM(chp->channel, drive), mode);
3698 	}
3699 	if (idedma_ctl != 0) {
3700 		/* Add software bits in status register */
3701 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3702 		    IDEDMA_CTL, idedma_ctl);
3703 	}
3704 	pciide_print_modes(cp);
3705 }
3706 
3707 int
3708 pdc202xx_pci_intr(arg)
3709 	void *arg;
3710 {
3711 	struct pciide_softc *sc = arg;
3712 	struct pciide_channel *cp;
3713 	struct channel_softc *wdc_cp;
3714 	int i, rv, crv;
3715 	u_int32_t scr;
3716 
3717 	rv = 0;
3718 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3719 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3720 		cp = &sc->pciide_channels[i];
3721 		wdc_cp = &cp->wdc_channel;
3722 		/* If a compat channel skip. */
3723 		if (cp->compat)
3724 			continue;
3725 		if (scr & PDC2xx_SCR_INT(i)) {
3726 			crv = wdcintr(wdc_cp);
3727 			if (crv == 0)
3728 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3729 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3730 			else
3731 				rv = 1;
3732 		}
3733         }
3734 	return rv;
3735 }
3736 
3737 int
3738 pdc20265_pci_intr(arg)
3739 	void *arg;
3740 {
3741 	struct pciide_softc *sc = arg;
3742 	struct pciide_channel *cp;
3743 	struct channel_softc *wdc_cp;
3744 	int i, rv, crv;
3745 	u_int32_t dmastat;
3746 
3747 	rv = 0;
3748 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3749 		cp = &sc->pciide_channels[i];
3750 		wdc_cp = &cp->wdc_channel;
3751 		/* If a compat channel skip. */
3752 		if (cp->compat)
3753 			continue;
3754 		/*
3755 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3756 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3757 		 * So use it instead (requires 2 reg reads instead of 1,
3758 		 * but we can't do it another way).
3759 		 */
3760 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3761 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3762 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3763 		    continue;
3764 
3765 		crv = wdcintr(wdc_cp);
3766 		if (crv == 0)
3767 			printf("%s:%d: bogus intr\n",
3768 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3769 		else
3770 			rv = 1;
3771 	}
3772 	return rv;
3773 }
3774 
3775 /*
3776  * Inline functions for accessing the timing registers of the
3777  * OPTi controller.
3778  *
3779  * These *MUST* disable interrupts as they need atomic access to
3780  * certain magic registers. Failure to adhere to this *will*
3781  * break things in subtle ways if the wdc registers are accessed
3782  * by an interrupt routine while this magic sequence is executing.
3783  */
3784 static __inline__ u_int8_t
3785 opti_read_config(struct channel_softc *chp, int reg)
3786 {
3787 	u_int8_t rv;
3788 	int s = splhigh();
3789 
3790 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
3791 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3792 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3793 
3794 	/* Followed by an 8-bit write of 0x3 to register #2 */
3795 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
3796 
3797 	/* Now we can read the required register */
3798 	rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg);
3799 
3800 	/* Restore the real registers */
3801 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
3802 
3803 	splx(s);
3804 
3805 	return rv;
3806 }
3807 
3808 static __inline__ void
3809 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val)
3810 {
3811 	int s = splhigh();
3812 
3813 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
3814 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3815 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3816 
3817 	/* Followed by an 8-bit write of 0x3 to register #2 */
3818 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
3819 
3820 	/* Now we can write the required register */
3821 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val);
3822 
3823 	/* Restore the real registers */
3824 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
3825 
3826 	splx(s);
3827 }
3828 
3829 void
3830 opti_chip_map(sc, pa)
3831 	struct pciide_softc *sc;
3832 	struct pci_attach_args *pa;
3833 {
3834 	struct pciide_channel *cp;
3835 	bus_size_t cmdsize, ctlsize;
3836 	pcireg_t interface;
3837 	u_int8_t init_ctrl;
3838 	int channel;
3839 
3840 	if (pciide_chipen(sc, pa) == 0)
3841 		return;
3842 	printf(": DMA");
3843 	/*
3844 	 * XXXSCW:
3845 	 * There seem to be a couple of buggy revisions/implementations
3846 	 * of the OPTi pciide chipset. This kludge seems to fix one of
3847 	 * the reported problems (NetBSD PR/11644) but still fails for the
3848 	 * other (NetBSD PR/13151), although the latter may be due to other
3849 	 * issues too...
3850 	 */
3851 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
3852 		printf(" (disabled)");
3853 		sc->sc_dma_ok = 0;
3854 		sc->sc_wdcdev.cap = 0;
3855 	} else {
3856 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3857 		pciide_mapreg_dma(sc, pa);
3858 	}
3859 
3860 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3861 	sc->sc_wdcdev.PIO_cap = 4;
3862 	if (sc->sc_dma_ok) {
3863 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3864 		sc->sc_wdcdev.irqack = pciide_irqack;
3865 		sc->sc_wdcdev.DMA_cap = 2;
3866 	}
3867 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3868 
3869 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3870 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3871 
3872 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3873 	    OPTI_REG_INIT_CONTROL);
3874 
3875 	interface = PCI_INTERFACE(pa->pa_class);
3876 
3877         pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3878 
3879 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3880 		cp = &sc->pciide_channels[channel];
3881 		if (pciide_chansetup(sc, channel, interface) == 0)
3882 			continue;
3883 		if (channel == 1 &&
3884 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3885 			printf("%s: %s channel ignored (disabled)\n",
3886 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3887 			continue;
3888 		}
3889 		pciide_map_compat_intr(pa, cp, channel, interface);
3890 		if (cp->hw_ok == 0)
3891 			continue;
3892 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3893 		    pciide_pci_intr);
3894 		if (cp->hw_ok == 0) {
3895 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3896 			continue;
3897 		}
3898 		opti_setup_channel(&cp->wdc_channel);
3899 	}
3900 }
3901 
3902 void
3903 opti_setup_channel(chp)
3904 	struct channel_softc *chp;
3905 {
3906 	struct ata_drive_datas *drvp;
3907 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3908 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3909 	int drive,spd;
3910 	int mode[2];
3911 	u_int8_t rv, mr;
3912 
3913 	/*
3914 	 * The `Delay' and `Address Setup Time' fields of the
3915 	 * Miscellaneous Register are always zero initially.
3916 	 */
3917 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3918 	mr &= ~(OPTI_MISC_DELAY_MASK |
3919 		OPTI_MISC_ADDR_SETUP_MASK |
3920 		OPTI_MISC_INDEX_MASK);
3921 
3922 	/* Prime the control register before setting timing values */
3923 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3924 
3925 	/* Determine the clockrate of the PCIbus the chip is attached to */
3926 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3927 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3928 
3929 	/* setup DMA if needed */
3930 	pciide_channel_dma_setup(cp);
3931 
3932 	for (drive = 0; drive < 2; drive++) {
3933 		drvp = &chp->ch_drive[drive];
3934 		/* If no drive, skip */
3935 		if ((drvp->drive_flags & DRIVE) == 0) {
3936 			mode[drive] = -1;
3937 			continue;
3938 		}
3939 
3940 		if ((drvp->drive_flags & DRIVE_DMA)) {
3941 			/*
3942 			 * Timings will be used for both PIO and DMA,
3943 			 * so adjust DMA mode if needed
3944 			 */
3945 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3946 				drvp->PIO_mode = drvp->DMA_mode + 2;
3947 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3948 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3949 				    drvp->PIO_mode - 2 : 0;
3950 			if (drvp->DMA_mode == 0)
3951 				drvp->PIO_mode = 0;
3952 
3953 			mode[drive] = drvp->DMA_mode + 5;
3954 		} else
3955 			mode[drive] = drvp->PIO_mode;
3956 
3957 		if (drive && mode[0] >= 0 &&
3958 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3959 			/*
3960 			 * Can't have two drives using different values
3961 			 * for `Address Setup Time'.
3962 			 * Slow down the faster drive to compensate.
3963 			 */
3964 			int d = (opti_tim_as[spd][mode[0]] >
3965 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3966 
3967 			mode[d] = mode[1-d];
3968 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3969 			chp->ch_drive[d].DMA_mode = 0;
3970 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3971 		}
3972 	}
3973 
3974 	for (drive = 0; drive < 2; drive++) {
3975 		int m;
3976 		if ((m = mode[drive]) < 0)
3977 			continue;
3978 
3979 		/* Set the Address Setup Time and select appropriate index */
3980 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3981 		rv |= OPTI_MISC_INDEX(drive);
3982 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3983 
3984 		/* Set the pulse width and recovery timing parameters */
3985 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3986 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3987 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3988 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3989 
3990 		/* Set the Enhanced Mode register appropriately */
3991 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3992 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3993 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3994 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3995 	}
3996 
3997 	/* Finally, enable the timings */
3998 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
3999 
4000 	pciide_print_modes(cp);
4001 }
4002 
4003 
4004 #define	ACARD_IS_850(sc)							\
4005 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4006 
4007 void
4008 acard_chip_map(sc, pa)
4009 	struct pciide_softc *sc;
4010 	struct pci_attach_args *pa;
4011 {
4012 	struct pciide_channel *cp;
4013 	int i;
4014 	pcireg_t interface;
4015 	bus_size_t cmdsize, ctlsize;
4016 
4017 	if (pciide_chipen(sc, pa) == 0)
4018 		return;
4019 
4020 	/*
4021 	 * when the chip is in native mode it identifies itself as a
4022 	 * 'misc mass storage'. Fake interface in this case.
4023 	 */
4024 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4025 		interface = PCI_INTERFACE(pa->pa_class);
4026 	} else {
4027 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4028 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4029 	}
4030 
4031 	printf(": DMA");
4032 	pciide_mapreg_dma(sc, pa);
4033 	printf("\n");
4034 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4035 	    WDC_CAPABILITY_MODE;
4036 
4037 	if (sc->sc_dma_ok) {
4038 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4039 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4040 		sc->sc_wdcdev.irqack = pciide_irqack;
4041 	}
4042 	sc->sc_wdcdev.PIO_cap = 4;
4043 	sc->sc_wdcdev.DMA_cap = 2;
4044 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4045 
4046 	sc->sc_wdcdev.set_modes = acard_setup_channel;
4047 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
4048 	sc->sc_wdcdev.nchannels = 2;
4049 
4050 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4051 		cp = &sc->pciide_channels[i];
4052 		if (pciide_chansetup(sc, i, interface) == 0)
4053 			continue;
4054 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
4055 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4056 			    &ctlsize, pciide_pci_intr);
4057 		} else {
4058 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4059 			    &cmdsize, &ctlsize);
4060 		}
4061 		if (cp->hw_ok == 0)
4062 			return;
4063 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4064 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4065 		wdcattach(&cp->wdc_channel);
4066 		acard_setup_channel(&cp->wdc_channel);
4067 	}
4068 	if (!ACARD_IS_850(sc)) {
4069 		u_int32_t reg;
4070 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4071 		reg &= ~ATP860_CTRL_INT;
4072 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4073 	}
4074 }
4075 
4076 void
4077 acard_setup_channel(chp)
4078 	struct channel_softc *chp;
4079 {
4080 	struct ata_drive_datas *drvp;
4081 	struct pciide_channel *cp = (struct pciide_channel*)chp;
4082 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4083 	int channel = chp->channel;
4084 	int drive;
4085 	u_int32_t idetime, udma_mode;
4086 	u_int32_t idedma_ctl;
4087 
4088 	/* setup DMA if needed */
4089 	pciide_channel_dma_setup(cp);
4090 
4091 	if (ACARD_IS_850(sc)) {
4092 		idetime = 0;
4093 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4094 		udma_mode &= ~ATP850_UDMA_MASK(channel);
4095 	} else {
4096 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4097 		idetime &= ~ATP860_SETTIME_MASK(channel);
4098 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4099 		udma_mode &= ~ATP860_UDMA_MASK(channel);
4100 	}
4101 
4102 	idedma_ctl = 0;
4103 
4104 	/* Per drive settings */
4105 	for (drive = 0; drive < 2; drive++) {
4106 		drvp = &chp->ch_drive[drive];
4107 		/* If no drive, skip */
4108 		if ((drvp->drive_flags & DRIVE) == 0)
4109 			continue;
4110 		/* add timing values, setup DMA if needed */
4111 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4112 		    (drvp->drive_flags & DRIVE_UDMA)) {
4113 			/* use Ultra/DMA */
4114 			if (ACARD_IS_850(sc)) {
4115 				idetime |= ATP850_SETTIME(drive,
4116 				    acard_act_udma[drvp->UDMA_mode],
4117 				    acard_rec_udma[drvp->UDMA_mode]);
4118 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
4119 				    acard_udma_conf[drvp->UDMA_mode]);
4120 			} else {
4121 				idetime |= ATP860_SETTIME(channel, drive,
4122 				    acard_act_udma[drvp->UDMA_mode],
4123 				    acard_rec_udma[drvp->UDMA_mode]);
4124 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
4125 				    acard_udma_conf[drvp->UDMA_mode]);
4126 			}
4127 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4128 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4129 		    (drvp->drive_flags & DRIVE_DMA)) {
4130 			/* use Multiword DMA */
4131 			drvp->drive_flags &= ~DRIVE_UDMA;
4132 			if (ACARD_IS_850(sc)) {
4133 				idetime |= ATP850_SETTIME(drive,
4134 				    acard_act_dma[drvp->DMA_mode],
4135 				    acard_rec_dma[drvp->DMA_mode]);
4136 			} else {
4137 				idetime |= ATP860_SETTIME(channel, drive,
4138 				    acard_act_dma[drvp->DMA_mode],
4139 				    acard_rec_dma[drvp->DMA_mode]);
4140 			}
4141 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4142 		} else {
4143 			/* PIO only */
4144 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4145 			if (ACARD_IS_850(sc)) {
4146 				idetime |= ATP850_SETTIME(drive,
4147 				    acard_act_pio[drvp->PIO_mode],
4148 				    acard_rec_pio[drvp->PIO_mode]);
4149 			} else {
4150 				idetime |= ATP860_SETTIME(channel, drive,
4151 				    acard_act_pio[drvp->PIO_mode],
4152 				    acard_rec_pio[drvp->PIO_mode]);
4153 			}
4154 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4155 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4156 		    | ATP8x0_CTRL_EN(channel));
4157 		}
4158 	}
4159 
4160 	if (idedma_ctl != 0) {
4161 		/* Add software bits in status register */
4162 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4163 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4164 	}
4165 	pciide_print_modes(cp);
4166 
4167 	if (ACARD_IS_850(sc)) {
4168 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4169 		    ATP850_IDETIME(channel), idetime);
4170 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4171 	} else {
4172 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4173 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4174 	}
4175 }
4176 
4177 int
4178 acard_pci_intr(arg)
4179 	void *arg;
4180 {
4181 	struct pciide_softc *sc = arg;
4182 	struct pciide_channel *cp;
4183 	struct channel_softc *wdc_cp;
4184 	int rv = 0;
4185 	int dmastat, i, crv;
4186 
4187 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4188 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4189 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4190 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4191 			continue;
4192 		cp = &sc->pciide_channels[i];
4193 		wdc_cp = &cp->wdc_channel;
4194 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4195 			(void)wdcintr(wdc_cp);
4196 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4197 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4198 			continue;
4199 		}
4200 		crv = wdcintr(wdc_cp);
4201 		if (crv == 0)
4202 			printf("%s:%d: bogus intr\n",
4203 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4204 		else if (crv == 1)
4205 			rv = 1;
4206 		else if (rv == 0)
4207 			rv = crv;
4208 	}
4209 	return rv;
4210 }
4211