xref: /openbsd-src/sys/dev/pci/pciide.c (revision b2ea75c1b17e1a9a339660e7ed45cd24946b230e)
1 /*	$OpenBSD: pciide.c,v 1.61 2001/08/12 20:33:50 mickey Exp $	*/
2 /*	$NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $	*/
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 /*
37  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *      This product includes software developed by Christopher G. Demetriou
50  *	for the NetBSD Project.
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  */
65 
66 /*
67  * PCI IDE controller driver.
68  *
69  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70  * sys/dev/pci/ppb.c, revision 1.16).
71  *
72  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74  * 5/16/94" from the PCI SIG.
75  *
76  */
77 
78 #define DEBUG_DMA   0x01
79 #define DEBUG_XFERS  0x02
80 #define DEBUG_FUNCS  0x08
81 #define DEBUG_PROBE  0x10
82 
83 #ifdef WDCDEBUG
84 int wdcdebug_pciide_mask = 0;
85 #define WDCDEBUG_PRINT(args, level) \
86 	if (wdcdebug_pciide_mask & (level)) printf args
87 #else
88 #define WDCDEBUG_PRINT(args, level)
89 #endif
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <sys/malloc.h>
94 
95 #include <vm/vm.h>
96 #include <vm/vm_kern.h>
97 
98 #include <machine/endian.h>
99 
100 #include <dev/pci/pcireg.h>
101 #include <dev/pci/pcivar.h>
102 #include <dev/pci/pcidevs.h>
103 #include <dev/pci/pciidereg.h>
104 #include <dev/pci/pciidevar.h>
105 #include <dev/pci/pciide_piix_reg.h>
106 #include <dev/pci/pciide_amd_reg.h>
107 #include <dev/pci/pciide_apollo_reg.h>
108 #include <dev/pci/pciide_cmd_reg.h>
109 #include <dev/pci/pciide_cy693_reg.h>
110 #include <dev/pci/pciide_sis_reg.h>
111 #include <dev/pci/pciide_acer_reg.h>
112 #include <dev/pci/pciide_pdc202xx_reg.h>
113 #include <dev/pci/pciide_opti_reg.h>
114 #include <dev/pci/pciide_hpt_reg.h>
115 #include <dev/pci/pciide_acard_reg.h>
116 
117 #include <dev/pci/cy82c693var.h>
118 
119 #include <dev/ata/atavar.h>
120 #include <dev/ic/wdcreg.h>
121 #include <dev/ic/wdcvar.h>
122 
123 /* inlines for reading/writing 8-bit PCI registers */
124 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
125 					      int));
126 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
127 					   int, u_int8_t));
128 
129 static __inline u_int8_t
130 pciide_pci_read(pc, pa, reg)
131 	pci_chipset_tag_t pc;
132 	pcitag_t pa;
133 	int reg;
134 {
135 
136 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
137 	    ((reg & 0x03) * 8) & 0xff);
138 }
139 
140 static __inline void
141 pciide_pci_write(pc, pa, reg, val)
142 	pci_chipset_tag_t pc;
143 	pcitag_t pa;
144 	int reg;
145 	u_int8_t val;
146 {
147 	pcireg_t pcival;
148 
149 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
150 	pcival &= ~(0xff << ((reg & 0x03) * 8));
151 	pcival |= (val << ((reg & 0x03) * 8));
152 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
153 }
154 
155 struct pciide_softc {
156 	struct wdc_softc	sc_wdcdev;	/* common wdc definitions */
157 	pci_chipset_tag_t	sc_pc;		/* PCI registers info */
158 	pcitag_t		sc_tag;
159 	void			*sc_pci_ih;	/* PCI interrupt handle */
160 	int			sc_dma_ok;	/* bus-master DMA info */
161 	bus_space_tag_t		sc_dma_iot;
162 	bus_space_handle_t	sc_dma_ioh;
163 	bus_dma_tag_t		sc_dmat;
164 
165 	/* For Cypress */
166 	const struct cy82c693_handle *sc_cy_handle;
167 	int sc_cy_compatchan;
168 
169 	/* Chip description */
170 	const struct pciide_product_desc *sc_pp;
171 	/* common definitions */
172 	struct channel_softc *wdc_chanarray[PCIIDE_NUM_CHANNELS];
173 	/* internal bookkeeping */
174 	struct pciide_channel {			/* per-channel data */
175 		struct channel_softc wdc_channel; /* generic part */
176 		char		*name;
177 		int		hw_ok;		/* hardware mapped & OK? */
178 		int		compat;		/* is it compat? */
179 		int             dma_in_progress;
180 		void		*ih;		/* compat or pci handle */
181 		bus_space_handle_t ctl_baseioh;	/* ctrl regs blk, native mode */
182 		/* DMA tables and DMA map for xfer, for each drive */
183 		struct pciide_dma_maps {
184 			bus_dmamap_t    dmamap_table;
185 			struct idedma_table *dma_table;
186 			bus_dmamap_t    dmamap_xfer;
187 			int dma_flags;
188 		} dma_maps[2];
189 	} pciide_channels[PCIIDE_NUM_CHANNELS];
190 };
191 
192 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
193 
194 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
195 void piix_setup_channel __P((struct channel_softc*));
196 void piix3_4_setup_channel __P((struct channel_softc*));
197 
198 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
199 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
200 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
201 
202 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
203 void amd756_setup_channel __P((struct channel_softc*));
204 
205 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
206 void apollo_setup_channel __P((struct channel_softc*));
207 
208 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
210 void cmd0643_9_setup_channel __P((struct channel_softc*));
211 void cmd_channel_map __P((struct pci_attach_args *,
212 			struct pciide_softc *, int));
213 int  cmd_pci_intr __P((void *));
214 void cmd646_9_irqack __P((struct channel_softc *));
215 
216 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
217 void cy693_setup_channel __P((struct channel_softc*));
218 
219 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
220 void sis_setup_channel __P((struct channel_softc*));
221 
222 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
223 void acer_setup_channel __P((struct channel_softc*));
224 int  acer_pci_intr __P((void *));
225 
226 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
227 void pdc202xx_setup_channel __P((struct channel_softc*));
228 int  pdc202xx_pci_intr __P((void *));
229 int  pdc20265_pci_intr __P((void *));
230 
231 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
232 void opti_setup_channel __P((struct channel_softc*));
233 
234 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
235 void hpt_setup_channel __P((struct channel_softc*));
236 int  hpt_pci_intr __P((void *));
237 
238 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
239 void acard_setup_channel __P((struct channel_softc*));
240 int  acard_pci_intr __P((void *));
241 
242 void pciide_channel_dma_setup __P((struct pciide_channel *));
243 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
244 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
245 void pciide_dma_start __P((void*, int, int));
246 int  pciide_dma_finish __P((void*, int, int));
247 void pciide_irqack __P((struct channel_softc *));
248 void pciide_print_modes __P((struct pciide_channel *));
249 void pciide_print_channels __P((int, pcireg_t));;
250 
251 struct pciide_product_desc {
252 	u_int32_t ide_product;
253 	u_short ide_flags;
254 	/* map and setup chip, probe drives */
255 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
256 };
257 
258 /* Flags for ide_flags */
259 #define IDE_PCI_CLASS_OVERRIDE	0x0001	/* accept even if class != pciide */
260 #define IDE_16BIT_IOSPACE	0x0002	/* I/O space BARS ignore upper word */
261 
262 /* Default product description for devices not known from this controller */
263 const struct pciide_product_desc default_product_desc = {
264 	0,				/* Generic PCI IDE controller */
265 	0,
266 	default_chip_map
267 };
268 
269 const struct pciide_product_desc pciide_intel_products[] =  {
270 	{ PCI_PRODUCT_INTEL_82092AA,	/* Intel 82092AA IDE */
271 	  0,
272 	  default_chip_map
273 	},
274 	{ PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */
275 	  0,
276 	  piix_chip_map
277 	},
278 	{ PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */
279 	  0,
280 	  piix_chip_map
281 	},
282 	{ PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */
283 	  0,
284 	  piix_chip_map
285 	},
286 	{ PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */
287 	  0,
288 	  piix_chip_map
289 	},
290 	{ PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */
291 	  0,
292 	  piix_chip_map
293 	},
294 	{ PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */
295 	  0,
296 	  piix_chip_map
297 	},
298 	{ PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */
299 	  0,
300 	  piix_chip_map
301 	},
302 	{ PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */
303 	  0,
304 	  piix_chip_map
305 	},
306 };
307 
308 const struct pciide_product_desc pciide_amd_products[] =  {
309 	{ PCI_PRODUCT_AMD_PBC756_IDE,	/* AMD 756 */
310 	  0,
311 	  amd756_chip_map
312 	},
313 	{ PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */
314 	  0,
315 	  amd756_chip_map
316 	},
317 };
318 
319 #ifdef notyet
320 const struct pciide_product_desc pciide_opti_products[] = {
321 
322 	{ PCI_PRODUCT_OPTI_82C621,
323 	  0,
324 	  opti_chip_map
325 	},
326 	{ PCI_PRODUCT_OPTI_82C568,
327 	  0,
328 	  opti_chip_map
329 	},
330 	{ PCI_PRODUCT_OPTI_82D568,
331 	  0,
332 	  opti_chip_map
333 	},
334 };
335 #endif
336 
337 const struct pciide_product_desc pciide_cmd_products[] =  {
338 	{ PCI_PRODUCT_CMDTECH_640,	/* CMD Technology PCI0640 */
339 	  0,
340 	  cmd_chip_map
341 	},
342 	{ PCI_PRODUCT_CMDTECH_643,	/* CMD Technology PCI0643 */
343 	  0,
344 	  cmd0643_9_chip_map
345 	},
346 	{ PCI_PRODUCT_CMDTECH_646,	/* CMD Technology PCI0646 */
347 	  0,
348 	  cmd0643_9_chip_map
349 	},
350 	{ PCI_PRODUCT_CMDTECH_648,	/* CMD Technology PCI0648 */
351 	  IDE_PCI_CLASS_OVERRIDE,
352 	  cmd0643_9_chip_map
353 	},
354 	{ PCI_PRODUCT_CMDTECH_649,	/* CMD Technology PCI0649 */
355 	  IDE_PCI_CLASS_OVERRIDE,
356 	  cmd0643_9_chip_map
357 	}
358 };
359 
360 const struct pciide_product_desc pciide_via_products[] =  {
361 	{ PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */
362 	  0,
363 	  apollo_chip_map
364 	 },
365 	{ PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */
366 	  0,
367 	  apollo_chip_map
368 	}
369 };
370 
371 const struct pciide_product_desc pciide_cypress_products[] =  {
372 	{ PCI_PRODUCT_CONTAQ_82C693,	/* Contaq CY82C693 IDE */
373 	  IDE_16BIT_IOSPACE,
374 	  cy693_chip_map
375 	}
376 };
377 
378 const struct pciide_product_desc pciide_sis_products[] =  {
379 	{ PCI_PRODUCT_SIS_5513,		/* SIS 5513 EIDE */
380 	  0,
381 	  sis_chip_map
382 	}
383 };
384 
385 const struct pciide_product_desc pciide_acer_products[] =  {
386 	{ PCI_PRODUCT_ALI_M5229,	/* Acer Labs M5229 UDMA IDE */
387 	  0,
388 	  acer_chip_map
389 	}
390 };
391 
392 const struct pciide_product_desc pciide_triones_products[] =  {
393 	{ PCI_PRODUCT_TRIONES_HPT366,	/* Highpoint HPT36x/37x IDE */
394 	  IDE_PCI_CLASS_OVERRIDE,
395 	  hpt_chip_map,
396 	}
397 };
398 
399 const struct pciide_product_desc pciide_promise_products[] =  {
400 	{ PCI_PRODUCT_PROMISE_PDC20246,
401 	IDE_PCI_CLASS_OVERRIDE,
402 	pdc202xx_chip_map,
403 	},
404 	{ PCI_PRODUCT_PROMISE_PDC20262,
405 	IDE_PCI_CLASS_OVERRIDE,
406 	pdc202xx_chip_map,
407 	},
408 	{ PCI_PRODUCT_PROMISE_PDC20265,
409 	IDE_PCI_CLASS_OVERRIDE,
410 	pdc202xx_chip_map,
411 	},
412 	{ PCI_PRODUCT_PROMISE_PDC20267,
413 	IDE_PCI_CLASS_OVERRIDE,
414 	pdc202xx_chip_map,
415 	}
416 };
417 
418 const struct pciide_product_desc pciide_acard_products[] =  {
419 	{ PCI_PRODUCT_ACARD_ATP850U,	/* Acard ATP850U Ultra33 Controller */
420 	 IDE_PCI_CLASS_OVERRIDE,
421 	 acard_chip_map,
422 	 },
423 	{ PCI_PRODUCT_ACARD_ATP860,	/* Acard ATP860 Ultra66 Controller */
424 	 IDE_PCI_CLASS_OVERRIDE,
425 	 acard_chip_map,
426 	},
427 	{ PCI_PRODUCT_ACARD_ATP860A,	/* Acard ATP860-A Ultra66 Controller */
428 	 IDE_PCI_CLASS_OVERRIDE,
429 	 acard_chip_map,
430 	}
431 };
432 
433 struct pciide_vendor_desc {
434 	u_int32_t ide_vendor;
435 	const struct pciide_product_desc *ide_products;
436 	int ide_nproducts;
437 };
438 
439 const struct pciide_vendor_desc pciide_vendors[] = {
440 	{ PCI_VENDOR_INTEL, pciide_intel_products,
441 	  sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) },
442 	{ PCI_VENDOR_AMD, pciide_amd_products,
443 	  sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) },
444 #ifdef notyet
445 	{ PCI_VENDOR_OPTI, pciide_opti_products,
446 	  sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) },
447 #endif
448 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products,
449 	  sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) },
450 	{ PCI_VENDOR_VIATECH, pciide_via_products,
451 	  sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) },
452 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products,
453 	  sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) },
454 	{ PCI_VENDOR_SIS, pciide_sis_products,
455 	  sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) },
456 	{ PCI_VENDOR_ALI, pciide_acer_products,
457 	  sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) },
458 	{ PCI_VENDOR_TRIONES, pciide_triones_products,
459 	  sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) },
460 	{ PCI_VENDOR_ACARD, pciide_acard_products,
461 	  sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) },
462 	{ PCI_VENDOR_PROMISE, pciide_promise_products,
463 	  sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }
464 };
465 
466 /* options passed via the 'flags' config keyword */
467 #define PCIIDE_OPTIONS_DMA	0x01
468 
469 #ifndef __OpenBSD__
470 int	pciide_match __P((struct device *, struct cfdata *, void *));
471 #else
472 int	pciide_match __P((struct device *, void *, void *));
473 #endif
474 void	pciide_attach __P((struct device *, struct device *, void *));
475 
476 struct cfattach pciide_ca = {
477 	sizeof(struct pciide_softc), pciide_match, pciide_attach
478 };
479 
480 #ifdef __OpenBSD__
481 struct        cfdriver pciide_cd = {
482       NULL, "pciide", DV_DULL
483 };
484 #endif
485 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
486 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
487 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
488 int	pciide_mapregs_native __P((struct pci_attach_args *,
489 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
490 	    int (*pci_intr) __P((void *))));
491 void	pciide_mapreg_dma __P((struct pciide_softc *,
492 	    struct pci_attach_args *));
493 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
494 void	pciide_mapchan __P((struct pci_attach_args *,
495 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
496 	    int (*pci_intr) __P((void *))));
497 int	pciide_chan_candisable __P((struct pciide_channel *));
498 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
499 	    struct pciide_channel *, int, int));
500 void	pciide_unmap_compat_intr __P(( struct pci_attach_args *,
501 	    struct pciide_channel *, int, int));
502 int	pciide_compat_intr __P((void *));
503 int	pciide_pci_intr __P((void *));
504 int     pciide_intr_flag(struct pciide_channel *);
505 
506 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
507 
508 const struct pciide_product_desc *
509 pciide_lookup_product(id)
510 	u_int32_t id;
511 {
512 	const struct pciide_product_desc *pp;
513 	const struct pciide_vendor_desc *vp;
514 	int i;
515 
516 	for (i = 0, vp = pciide_vendors;
517 	    i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]);
518 	    vp++, i++)
519 		if (PCI_VENDOR(id) == vp->ide_vendor)
520 			break;
521 
522 	if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0]))
523 		return NULL;
524 
525 	for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++)
526 		if (PCI_PRODUCT(id) == pp->ide_product)
527 			break;
528 
529 	if (i == vp->ide_nproducts)
530 		return NULL;
531 	return pp;
532 }
533 
534 int
535 pciide_match(parent, match, aux)
536 	struct device *parent;
537 #ifdef __OpenBSD__
538 	void *match;
539 #else
540 	struct cfdata *match;
541 #endif
542 	void *aux;
543 {
544 	struct pci_attach_args *pa = aux;
545 	const struct pciide_product_desc *pp;
546 
547 	/*
548  	 * Some IDE controllers have severe bugs when used in PCI mode.
549 	 * We punt and attach them to the ISA bus instead.
550 	 */
551 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH &&
552 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000)
553 		return (0);
554 
555 	/*
556 	 * Check the ID register to see that it's a PCI IDE controller.
557 	 * If it is, we assume that we can deal with it; it _should_
558 	 * work in a standardized way...
559 	 */
560 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
561 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
562 		return (1);
563 	}
564 
565 	/*
566  	 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE
567 	 * controllers. Let see if we can deal with it anyway.
568 	 */
569 	pp = pciide_lookup_product(pa->pa_id);
570 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
571 		return (1);
572 	}
573 
574 	return (0);
575 }
576 
577 void
578 pciide_attach(parent, self, aux)
579 	struct device *parent, *self;
580 	void *aux;
581 {
582 	struct pci_attach_args *pa = aux;
583 	pci_chipset_tag_t pc = pa->pa_pc;
584 	pcitag_t tag = pa->pa_tag;
585 	struct pciide_softc *sc = (struct pciide_softc *)self;
586 	pcireg_t csr;
587 	char devinfo[256];
588 
589 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
590 	if (sc->sc_pp == NULL) {
591 		sc->sc_pp = &default_product_desc;
592 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
593 	}
594 
595 	sc->sc_pc = pa->pa_pc;
596 	sc->sc_tag = pa->pa_tag;
597 
598 #ifdef WDCDEBUG
599        if (wdcdebug_pciide_mask & DEBUG_PROBE)
600                printf(" sc_pc %p, sc_tag %p\n", sc->sc_pc, sc->sc_tag);
601 #endif
602 
603 	sc->sc_pp->chip_map(sc, pa);
604 
605 	if (sc->sc_dma_ok) {
606 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
607 		csr |= PCI_COMMAND_MASTER_ENABLE;
608 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
609 	}
610 
611 	WDCDEBUG_PRINT(("pciide: command/status register=%x\n",
612 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
613 }
614 
615 /* tell wether the chip is enabled or not */
616 int
617 pciide_chipen(sc, pa)
618 	struct pciide_softc *sc;
619 	struct pci_attach_args *pa;
620 {
621 	pcireg_t csr;
622 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
623 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
624 		    PCI_COMMAND_STATUS_REG);
625 		printf("%s: device disabled (at %s)\n",
626 	 	   sc->sc_wdcdev.sc_dev.dv_xname,
627 	  	  (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
628 		  "device" : "bridge");
629 		return 0;
630 	}
631 	return 1;
632 }
633 
634 int
635 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
636 	struct pci_attach_args *pa;
637 	struct pciide_channel *cp;
638 	int compatchan;
639 	bus_size_t *cmdsizep, *ctlsizep;
640 {
641 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
642 	struct channel_softc *wdc_cp = &cp->wdc_channel;
643 
644 	cp->compat = 1;
645 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
646 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
647 
648 	wdc_cp->cmd_iot = pa->pa_iot;
649 
650 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
651 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
652 		printf("%s: couldn't map %s cmd regs\n",
653 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
654 		return (0);
655 	}
656 
657 	wdc_cp->ctl_iot = pa->pa_iot;
658 
659 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
660 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
661 		printf("%s: couldn't map %s ctl regs\n",
662 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
663 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
664 		    PCIIDE_COMPAT_CMD_SIZE);
665 		return (0);
666 	}
667 
668 	return (1);
669 }
670 
671 int
672 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
673 	struct pci_attach_args * pa;
674 	struct pciide_channel *cp;
675 	bus_size_t *cmdsizep, *ctlsizep;
676 	int (*pci_intr) __P((void *));
677 {
678 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
679 	struct channel_softc *wdc_cp = &cp->wdc_channel;
680 	const char *intrstr;
681 	pci_intr_handle_t intrhandle;
682 
683 	cp->compat = 0;
684 
685 	if (sc->sc_pci_ih == NULL) {
686 		if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin,
687 		    pa->pa_intrline, &intrhandle) != 0) {
688 			printf("%s: couldn't map native-PCI interrupt\n",
689 			    sc->sc_wdcdev.sc_dev.dv_xname);
690 			return 0;
691 		}
692 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
693 #ifdef __OpenBSD__
694 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
695 		    intrhandle, IPL_BIO, pci_intr, sc,
696 		    sc->sc_wdcdev.sc_dev.dv_xname);
697 #else
698 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
699 		    intrhandle, IPL_BIO, pci_intr, sc);
700 #endif
701 		if (sc->sc_pci_ih != NULL) {
702 			printf("%s: using %s for native-PCI interrupt\n",
703 			    sc->sc_wdcdev.sc_dev.dv_xname,
704 			    intrstr ? intrstr : "unknown interrupt");
705 		} else {
706 			printf("%s: couldn't establish native-PCI interrupt",
707 			    sc->sc_wdcdev.sc_dev.dv_xname);
708 			if (intrstr != NULL)
709 				printf(" at %s", intrstr);
710 			printf("\n");
711 			return 0;
712 		}
713 	}
714 	cp->ih = sc->sc_pci_ih;
715 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
716 	    PCI_MAPREG_TYPE_IO, 0,
717 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) {
718 		printf("%s: couldn't map %s cmd regs\n",
719 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
720 		return 0;
721 	}
722 
723 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
724 	    PCI_MAPREG_TYPE_IO, 0,
725 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) {
726 		printf("%s: couldn't map %s ctl regs\n",
727 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
728 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
729 		return 0;
730 	}
731 	/*
732 	 * In native mode, 4 bytes of I/O space are mapped for the control
733 	 * register, the control register is at offset 2. Pass the generic
734 	 * code a handle for only one byte at the right offset.
735 	 */
736 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
737 	    &wdc_cp->ctl_ioh) != 0) {
738 		printf("%s: unable to subregion %s channel ctl regs\n",
739 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
740 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
741 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
742 		return 0;
743 	}
744 	return (1);
745 }
746 
747 void
748 pciide_mapreg_dma(sc, pa)
749 	struct pciide_softc *sc;
750 	struct pci_attach_args *pa;
751 {
752 	pcireg_t maptype;
753 	bus_addr_t addr;
754 
755 	/*
756 	 * Map DMA registers
757 	 *
758 	 * Note that sc_dma_ok is the right variable to test to see if
759 	 * DMA can be done.  If the interface doesn't support DMA,
760 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
761 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
762 	 * non-zero if the interface supports DMA and the registers
763 	 * could be mapped.
764 	 *
765 	 * XXX Note that despite the fact that the Bus Master IDE specs
766 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
767 	 * XXX space," some controllers (at least the United
768 	 * XXX Microelectronics UM8886BF) place it in memory space.
769 	 */
770 
771 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
772 	    PCIIDE_REG_BUS_MASTER_DMA);
773 
774 	switch (maptype) {
775 	case PCI_MAPREG_TYPE_IO:
776 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
777 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
778 		    &addr, NULL, NULL) == 0);
779 		if (sc->sc_dma_ok == 0) {
780 			printf(", unused (couldn't query registers)");
781 			break;
782 		}
783 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
784 		    && addr >= 0x10000) {
785 			sc->sc_dma_ok = 0;
786 			printf(", unused (registers at unsafe address %#lx)", addr);
787 			break;
788 		}
789 		/* FALLTHROUGH */
790 
791 	case PCI_MAPREG_MEM_TYPE_32BIT:
792 		sc->sc_dma_ok = (pci_mapreg_map(pa,
793 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
794 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0);
795 		sc->sc_dmat = pa->pa_dmat;
796 		if (sc->sc_dma_ok == 0) {
797 			printf(", unused (couldn't map registers)");
798 		} else {
799 			sc->sc_wdcdev.dma_arg = sc;
800 			sc->sc_wdcdev.dma_init = pciide_dma_init;
801 			sc->sc_wdcdev.dma_start = pciide_dma_start;
802 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
803 		}
804 		break;
805 
806 	default:
807 		sc->sc_dma_ok = 0;
808 		printf(", (unsupported maptype 0x%x)", maptype);
809 		break;
810 	}
811 }
812 
813 int
814 pciide_intr_flag(struct pciide_channel *cp)
815 {
816 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
817 
818 	if (cp->dma_in_progress) {
819 		int retry = 10;
820 		int status;
821 
822 		/* Check the status register */
823 		for (retry = 10; retry > 0; retry--) {
824 			status = bus_space_read_1(sc->sc_dma_iot,
825 			    sc->sc_dma_ioh,
826 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET *
827 			    cp->wdc_channel.channel);
828 			if (status & IDEDMA_CTL_INTR) {
829 				break;
830 			}
831 			DELAY(5);
832 		}
833 
834 		/* Not for us.  */
835 		if (retry == 0)
836 			return (0);
837 
838 		return (1);
839 	}
840 
841 	return (-1);
842 }
843 
844 int
845 pciide_compat_intr(arg)
846 	void *arg;
847 {
848 	struct pciide_channel *cp = arg;
849 
850 	if (pciide_intr_flag(cp) == 0)
851 		return 0;
852 
853 #ifdef DIAGNOSTIC
854 	/* should only be called for a compat channel */
855 	if (cp->compat == 0)
856 		panic("pciide compat intr called for non-compat chan %p\n", cp);
857 #endif
858 	return (wdcintr(&cp->wdc_channel));
859 }
860 
861 int
862 pciide_pci_intr(arg)
863 	void *arg;
864 {
865 	struct pciide_softc *sc = arg;
866 	struct pciide_channel *cp;
867 	struct channel_softc *wdc_cp;
868 	int i, rv, crv;
869 
870 	rv = 0;
871 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
872 		cp = &sc->pciide_channels[i];
873 		wdc_cp = &cp->wdc_channel;
874 
875 		/* If a compat channel skip. */
876 		if (cp->compat)
877 			continue;
878 		/* if this channel not waiting for intr, skip */
879 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
880 			continue;
881 
882 		if (pciide_intr_flag(cp) == 0)
883 			continue;
884 
885 		crv = wdcintr(wdc_cp);
886 		if (crv == 0)
887 			;		/* leave rv alone */
888 		else if (crv == 1)
889 			rv = 1;		/* claim the intr */
890 		else if (rv == 0)	/* crv should be -1 in this case */
891 			rv = crv;	/* if we've done no better, take it */
892 	}
893 	return (rv);
894 }
895 
896 void
897 pciide_channel_dma_setup(cp)
898 	struct pciide_channel *cp;
899 {
900 	int drive;
901 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
902 	struct ata_drive_datas *drvp;
903 
904 	for (drive = 0; drive < 2; drive++) {
905 		drvp = &cp->wdc_channel.ch_drive[drive];
906 		/* If no drive, skip */
907 		if ((drvp->drive_flags & DRIVE) == 0)
908 			continue;
909 		/* setup DMA if needed */
910 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
911 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
912 		    sc->sc_dma_ok == 0) {
913 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
914 			continue;
915 		}
916 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
917 		    != 0) {
918 			/* Abort DMA setup */
919 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
920 			continue;
921 		}
922 	}
923 }
924 
925 int
926 pciide_dma_table_setup(sc, channel, drive)
927 	struct pciide_softc *sc;
928 	int channel, drive;
929 {
930 	bus_dma_segment_t seg;
931 	int error, rseg;
932 	const bus_size_t dma_table_size =
933 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
934 	struct pciide_dma_maps *dma_maps =
935 	    &sc->pciide_channels[channel].dma_maps[drive];
936 
937 	/* If table was already allocated, just return */
938 	if (dma_maps->dma_table)
939 		return 0;
940 
941 	/* Allocate memory for the DMA tables and map it */
942 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
943 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
944 	    BUS_DMA_NOWAIT)) != 0) {
945 		printf("%s:%d: unable to allocate table DMA for "
946 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
947 		    channel, drive, error);
948 		return error;
949 	}
950 
951 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
952 	    dma_table_size,
953 	    (caddr_t *)&dma_maps->dma_table,
954 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
955 		printf("%s:%d: unable to map table DMA for"
956 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
957 		    channel, drive, error);
958 		return error;
959 	}
960 
961 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
962 	    "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
963 	    seg.ds_addr), DEBUG_PROBE);
964 
965 	/* Create and load table DMA map for this disk */
966 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
967 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
968 	    &dma_maps->dmamap_table)) != 0) {
969 		printf("%s:%d: unable to create table DMA map for "
970 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
971 		    channel, drive, error);
972 		return error;
973 	}
974 	if ((error = bus_dmamap_load(sc->sc_dmat,
975 	    dma_maps->dmamap_table,
976 	    dma_maps->dma_table,
977 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
978 		printf("%s:%d: unable to load table DMA map for "
979 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
980 		    channel, drive, error);
981 		return error;
982 	}
983 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
984 	    dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
985 	/* Create a xfer DMA map for this drive */
986 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
987 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
988 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
989 	    &dma_maps->dmamap_xfer)) != 0) {
990 		printf("%s:%d: unable to create xfer DMA map for "
991 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
992 		    channel, drive, error);
993 		return error;
994 	}
995 	return 0;
996 }
997 
998 int
999 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1000 	void *v;
1001 	int channel, drive;
1002 	void *databuf;
1003 	size_t datalen;
1004 	int flags;
1005 {
1006 	struct pciide_softc *sc = v;
1007 	int error, seg;
1008 	struct pciide_dma_maps *dma_maps =
1009 	    &sc->pciide_channels[channel].dma_maps[drive];
1010 
1011 	error = bus_dmamap_load(sc->sc_dmat,
1012 	    dma_maps->dmamap_xfer,
1013 	    databuf, datalen, NULL, BUS_DMA_NOWAIT);
1014 	if (error) {
1015 		printf("%s:%d: unable to load xfer DMA map for"
1016 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1017 		    channel, drive, error);
1018 		return error;
1019 	}
1020 
1021 #ifndef __OpenBSD__
1022 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1023 	    0,
1024 	    dma_maps->dmamap_xfer->dm_mapsize,
1025 	    (flags & WDC_DMA_READ) ?
1026 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1027 #else
1028 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1029 	    (flags & WDC_DMA_READ) ?
1030 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1031 #endif
1032 
1033 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1034 #ifdef DIAGNOSTIC
1035 		/* A segment must not cross a 64k boundary */
1036 		{
1037 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1038 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1039 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1040 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1041 			printf("pciide_dma: segment %d physical addr 0x%lx"
1042 			    " len 0x%lx not properly aligned\n",
1043 			    seg, phys, len);
1044 			panic("pciide_dma: buf align");
1045 		}
1046 		}
1047 #endif
1048 		dma_maps->dma_table[seg].base_addr =
1049 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1050 		dma_maps->dma_table[seg].byte_count =
1051 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1052 		    IDEDMA_BYTE_COUNT_MASK);
1053 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1054 		   seg, letoh32(dma_maps->dma_table[seg].byte_count),
1055 		   letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1056 
1057 	}
1058 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1059 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1060 
1061 #ifndef __OpenBSD__
1062 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table,
1063 	    0,
1064 	    dma_maps->dmamap_table->dm_mapsize,
1065 	    BUS_DMASYNC_PREWRITE);
1066 #else
1067 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table,
1068 	    BUS_DMASYNC_PREWRITE);
1069 #endif
1070 
1071 	/* Maps are ready. Start DMA function */
1072 #ifdef DIAGNOSTIC
1073 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1074 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1075 		    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1076 		panic("pciide_dma_init: table align");
1077 	}
1078 #endif
1079 
1080 	/* Clear status bits */
1081 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1082 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1083 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1084 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1085 	/* Write table addr */
1086 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1087 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1088 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1089 	/* set read/write */
1090 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1091 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1092 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1093 	/* remember flags */
1094 	dma_maps->dma_flags = flags;
1095 	return 0;
1096 }
1097 
1098 void
1099 pciide_dma_start(v, channel, drive)
1100 	void *v;
1101 	int channel, drive;
1102 {
1103 	struct pciide_softc *sc = v;
1104 
1105 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1106 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1107 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1108 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1109 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1110 
1111 	sc->pciide_channels[channel].dma_in_progress = 1;
1112 }
1113 
1114 int
1115 pciide_dma_finish(v, channel, drive)
1116 	void *v;
1117 	int channel, drive;
1118 {
1119 	struct pciide_softc *sc = v;
1120 	u_int8_t status;
1121 	int error = 0;
1122 	struct pciide_dma_maps *dma_maps =
1123 	    &sc->pciide_channels[channel].dma_maps[drive];
1124 
1125 	sc->pciide_channels[channel].dma_in_progress = 0;
1126 
1127 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1128 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1129 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1130 	    DEBUG_XFERS);
1131 
1132 	/* stop DMA channel */
1133 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1134 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1135 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1136 	    0x00 : IDEDMA_CMD_WRITE);
1137 
1138 	/* Unload the map of the data buffer */
1139 #ifndef __OpenBSD__
1140 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1141 	    0,
1142 	    dma_maps->dmamap_xfer->dm_mapsize,
1143 	    (dma_map->dma_flags & WDC_DMA_READ) ?
1144 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1145 #else
1146 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer,
1147 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1148 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1149 #endif
1150 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1151 
1152 	/* Clear status bits */
1153 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1154 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1155 	    status);
1156 
1157 	if ((status & IDEDMA_CTL_ERR) != 0) {
1158 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1159 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1160 		error |= WDC_DMAST_ERR;
1161 	}
1162 
1163 	if ((status & IDEDMA_CTL_INTR) == 0) {
1164 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1165 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1166 		    drive, status);
1167 		error |= WDC_DMAST_NOIRQ;
1168 	}
1169 
1170 	if ((status & IDEDMA_CTL_ACT) != 0) {
1171 		/* data underrun, may be a valid condition for ATAPI */
1172 		error |= WDC_DMAST_UNDER;
1173 	}
1174 	return error;
1175 }
1176 
1177 void
1178 pciide_irqack(chp)
1179         struct channel_softc *chp;
1180 {
1181         struct pciide_channel *cp = (struct pciide_channel*)chp;
1182         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1183 
1184         /* clear status bits in IDE DMA registers */
1185         bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1186             IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1187             bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1188                 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1189 }
1190 
1191 /* some common code used by several chip_map */
1192 int
1193 pciide_chansetup(sc, channel, interface)
1194 	struct pciide_softc *sc;
1195 	int channel;
1196 	pcireg_t interface;
1197 {
1198 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1199 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1200 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1201 	cp->wdc_channel.channel = channel;
1202 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1203 	cp->wdc_channel.ch_queue =
1204 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1205 	if (cp->wdc_channel.ch_queue == NULL) {
1206 		printf("%s: %s "
1207 			"cannot allocate memory for command queue",
1208 		sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1209 		return 0;
1210 	}
1211 	cp->hw_ok = 1;
1212 
1213 	return 1;
1214 }
1215 
1216 /* some common code used by several chip channel_map */
1217 void
1218 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1219 	struct pci_attach_args *pa;
1220 	struct pciide_channel *cp;
1221 	pcireg_t interface;
1222 	bus_size_t *cmdsizep, *ctlsizep;
1223 	int (*pci_intr) __P((void *));
1224 {
1225 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1226 
1227 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1228 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1229 		    pci_intr);
1230 	else
1231 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1232 		    wdc_cp->channel, cmdsizep, ctlsizep);
1233 	if (cp->hw_ok == 0)
1234 		return;
1235 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1236 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1237 	wdcattach(wdc_cp);
1238 }
1239 
1240 /*
1241  * Generic code to call to know if a channel can be disabled. Return 1
1242  * if channel can be disabled, 0 if not
1243  */
1244 int
1245 pciide_chan_candisable(cp)
1246 	struct pciide_channel *cp;
1247 {
1248 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1249 
1250 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1251 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1252 		cp->hw_ok = 0;
1253 		return 1;
1254 	}
1255 	return 0;
1256 }
1257 
1258 /*
1259  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1260  * Set hw_ok=0 on failure
1261  */
1262 void
1263 pciide_map_compat_intr(pa, cp, compatchan, interface)
1264 	struct pci_attach_args *pa;
1265 	struct pciide_channel *cp;
1266 	int compatchan, interface;
1267 {
1268 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1269 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1270 
1271 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1272 		return;
1273 
1274 	cp->compat = 1;
1275 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1276 	    pa, compatchan, pciide_compat_intr, cp);
1277 	if (cp->ih == NULL) {
1278 		printf("%s: no compatibility interrupt for use by %s\n",
1279 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1280 		cp->hw_ok = 0;
1281 	}
1282 }
1283 
1284 /*
1285  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1286  * Set hw_ok=0 on failure
1287  */
1288 void
1289 pciide_unmap_compat_intr(pa, cp, compatchan, interface)
1290 	struct pci_attach_args *pa;
1291 	struct pciide_channel *cp;
1292 	int compatchan, interface;
1293 {
1294 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1295 
1296 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1297 		return;
1298 
1299 	pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih);
1300 }
1301 
1302 void
1303 pciide_print_channels(nchannels, interface)
1304 	int nchannels;
1305 	pcireg_t interface;
1306 {
1307 	int i;
1308 
1309 	for (i = 0; i < nchannels; i++) {
1310 		printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i),
1311 		    (interface & PCIIDE_INTERFACE_SETTABLE(i)) ?
1312 		    "configured" : "wired",
1313 		    (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" :
1314 		    "compatibility");
1315 		    }
1316 
1317 	printf("\n");
1318 }
1319 
1320 void
1321 pciide_print_modes(cp)
1322 	struct pciide_channel *cp;
1323 {
1324 	wdc_print_current_modes(&cp->wdc_channel);
1325 }
1326 
1327 void
1328 default_chip_map(sc, pa)
1329 	struct pciide_softc *sc;
1330 	struct pci_attach_args *pa;
1331 {
1332 	struct pciide_channel *cp;
1333 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1334 	pcireg_t csr;
1335 	int channel, drive;
1336 	struct ata_drive_datas *drvp;
1337 	u_int8_t idedma_ctl;
1338 	bus_size_t cmdsize, ctlsize;
1339 	char *failreason;
1340 
1341 	if (pciide_chipen(sc, pa) == 0)
1342 		return;
1343 
1344 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1345 		printf(": DMA");
1346 		if (sc->sc_pp == &default_product_desc &&
1347 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1348 		    PCIIDE_OPTIONS_DMA) == 0) {
1349 			printf(" (unsupported)");
1350 			sc->sc_dma_ok = 0;
1351 		} else {
1352 			pciide_mapreg_dma(sc, pa);
1353 		if (sc->sc_dma_ok != 0)
1354 			printf(", (partial support)");
1355 		}
1356 	} else {
1357 		printf(": no DMA");
1358 		sc->sc_dma_ok = 0;
1359 	}
1360 	if (sc->sc_dma_ok) {
1361 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1362 		sc->sc_wdcdev.irqack = pciide_irqack;
1363 	}
1364 	sc->sc_wdcdev.PIO_cap = 0;
1365 	sc->sc_wdcdev.DMA_cap = 0;
1366 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1367 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1368 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1369 
1370 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1371 
1372 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1373 		cp = &sc->pciide_channels[channel];
1374 		if (pciide_chansetup(sc, channel, interface) == 0)
1375 		    continue;
1376 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1377 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1378 			    &ctlsize, pciide_pci_intr);
1379 		} else {
1380 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1381 			    channel, &cmdsize, &ctlsize);
1382 		}
1383 		if (cp->hw_ok == 0)
1384 			continue;
1385 		/*
1386 		 * Check to see if something appears to be there.
1387 		 */
1388 		failreason = NULL;
1389 		pciide_map_compat_intr(pa, cp, channel, interface);
1390 		if (cp->hw_ok == 0)
1391 			continue;
1392 		if (!wdcprobe(&cp->wdc_channel)) {
1393 			failreason = "not responding; disabled or no drives?";
1394 			goto next;
1395 		}
1396 		/*
1397 		 * Now, make sure it's actually attributable to this PCI IDE
1398 		 * channel by trying to access the channel again while the
1399 		 * PCI IDE controller's I/O space is disabled.  (If the
1400 		 * channel no longer appears to be there, it belongs to
1401 		 * this controller.)  YUCK!
1402 		 */
1403 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1404 	  	    PCI_COMMAND_STATUS_REG);
1405 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1406 		    csr & ~PCI_COMMAND_IO_ENABLE);
1407 		if (wdcprobe(&cp->wdc_channel))
1408 			failreason = "other hardware responding at addresses";
1409 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1410 		    PCI_COMMAND_STATUS_REG, csr);
1411 next:
1412 		if (failreason) {
1413 			printf("%s: %s ignored (%s)\n",
1414 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1415 			    failreason);
1416 			cp->hw_ok = 0;
1417 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1418 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1419 			    cp->wdc_channel.cmd_ioh, cmdsize);
1420 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1421 			    cp->wdc_channel.ctl_ioh, ctlsize);
1422 		}
1423 		if (cp->hw_ok) {
1424 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1425 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1426 			wdcattach(&cp->wdc_channel);
1427 		}
1428 	}
1429 
1430 	if (sc->sc_dma_ok == 0)
1431 		return;
1432 
1433 	/* Allocate DMA maps */
1434 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1435 		idedma_ctl = 0;
1436 		cp = &sc->pciide_channels[channel];
1437 		for (drive = 0; drive < 2; drive++) {
1438 			drvp = &cp->wdc_channel.ch_drive[drive];
1439 			/* If no drive, skip */
1440 			if ((drvp->drive_flags & DRIVE) == 0)
1441 				continue;
1442 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1443 				continue;
1444 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1445 				/* Abort DMA setup */
1446 				printf("%s:%d:%d: cannot allocate DMA maps, "
1447 				    "using PIO transfers\n",
1448 				    sc->sc_wdcdev.sc_dev.dv_xname,
1449 				    channel, drive);
1450 				drvp->drive_flags &= ~DRIVE_DMA;
1451 			}
1452 			printf("%s:%d:%d: using DMA data transfers\n",
1453 			    sc->sc_wdcdev.sc_dev.dv_xname,
1454 			    channel, drive);
1455 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1456 		}
1457 		if (idedma_ctl != 0) {
1458 			/* Add software bits in status register */
1459 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1460 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1461 			    idedma_ctl);
1462 		}
1463 	}
1464 }
1465 
1466 void
1467 piix_chip_map(sc, pa)
1468 	struct pciide_softc *sc;
1469 	struct pci_attach_args *pa;
1470 {
1471 	struct pciide_channel *cp;
1472 	int channel;
1473 	u_int32_t idetim;
1474 	bus_size_t cmdsize, ctlsize;
1475 
1476 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1477 
1478 	if (pciide_chipen(sc, pa) == 0)
1479 		return;
1480 
1481 	printf(": DMA");
1482 	pciide_mapreg_dma(sc, pa);
1483 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1484 	    WDC_CAPABILITY_MODE;
1485 	if (sc->sc_dma_ok) {
1486 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1487 		sc->sc_wdcdev.irqack = pciide_irqack;
1488 		switch (sc->sc_pp->ide_product) {
1489 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1490 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1491 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1492 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1493 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1494 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1495 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1496 			break;
1497 		}
1498 	}
1499 	sc->sc_wdcdev.PIO_cap = 4;
1500 	sc->sc_wdcdev.DMA_cap = 2;
1501 	switch (sc->sc_pp->ide_product) {
1502 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1503 		sc->sc_wdcdev.UDMA_cap = 4;
1504 		break;
1505 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1506 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1507 		sc->sc_wdcdev.UDMA_cap = 5;
1508 		break;
1509 	default:
1510 		sc->sc_wdcdev.UDMA_cap = 2;
1511 		break;
1512 	}
1513 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1514 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1515 	else
1516 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1517 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1518 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1519 
1520 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1521 
1522 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1523 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1524 	    DEBUG_PROBE);
1525 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1526 		WDCDEBUG_PRINT((", sidetim=0x%x",
1527 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1528 		    DEBUG_PROBE);
1529 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1530 			WDCDEBUG_PRINT((", udamreg 0x%x",
1531 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1532 			    DEBUG_PROBE);
1533 		}
1534 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1535 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1536 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1537 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1538 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1539 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1540 			    DEBUG_PROBE);
1541 		}
1542 
1543 	}
1544 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1545 
1546 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1547 		cp = &sc->pciide_channels[channel];
1548 		/* PIIX is compat-only */
1549 		if (pciide_chansetup(sc, channel, 0) == 0)
1550 			continue;
1551 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1552 		if ((PIIX_IDETIM_READ(idetim, channel) &
1553 		    PIIX_IDETIM_IDE) == 0) {
1554 			printf("%s: %s ignored (disabled)\n",
1555 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1556 			continue;
1557 		}
1558 		/* PIIX are compat-only pciide devices */
1559 		pciide_map_compat_intr(pa, cp, channel, 0);
1560 		if (cp->hw_ok == 0)
1561 			continue;
1562 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1563 		if (cp->hw_ok == 0)
1564 			goto next;
1565 		if (pciide_chan_candisable(cp)) {
1566 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1567 			    channel);
1568 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1569 			    idetim);
1570 		}
1571 		if (cp->hw_ok == 0)
1572 			goto next;
1573 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1574 next:
1575 		if (cp->hw_ok == 0)
1576 			pciide_unmap_compat_intr(pa, cp, channel, 0);
1577 	}
1578 
1579 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1580 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1581 	    DEBUG_PROBE);
1582 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1583 		WDCDEBUG_PRINT((", sidetim=0x%x",
1584 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1585 		    DEBUG_PROBE);
1586 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1587 			WDCDEBUG_PRINT((", udamreg 0x%x",
1588 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1589 			        DEBUG_PROBE);
1590 		}
1591 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1592 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1593 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1594 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1595 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1596 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1597 			DEBUG_PROBE);
1598 		}
1599 	}
1600 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1601 }
1602 
1603 void
1604 piix_setup_channel(chp)
1605 	struct channel_softc *chp;
1606 {
1607 	u_int8_t mode[2], drive;
1608 	u_int32_t oidetim, idetim, idedma_ctl;
1609 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1610 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1611 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1612 
1613 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1614 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1615 	idedma_ctl = 0;
1616 
1617 	/* set up new idetim: Enable IDE registers decode */
1618 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1619 	    chp->channel);
1620 
1621 	/* setup DMA */
1622 	pciide_channel_dma_setup(cp);
1623 
1624 	/*
1625 	 * Here we have to mess up with drives mode: PIIX can't have
1626 	 * different timings for master and slave drives.
1627 	 * We need to find the best combination.
1628 	 */
1629 
1630 	/* If both drives supports DMA, take the lower mode */
1631 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1632 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1633 		mode[0] = mode[1] =
1634 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1635 		    drvp[0].DMA_mode = mode[0];
1636 		    drvp[1].DMA_mode = mode[1];
1637 		goto ok;
1638 	}
1639 	/*
1640 	 * If only one drive supports DMA, use its mode, and
1641 	 * put the other one in PIO mode 0 if mode not compatible
1642 	 */
1643 	if (drvp[0].drive_flags & DRIVE_DMA) {
1644 		mode[0] = drvp[0].DMA_mode;
1645 		mode[1] = drvp[1].PIO_mode;
1646 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1647 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1648 			mode[1] = drvp[1].PIO_mode = 0;
1649 		goto ok;
1650 	}
1651 	if (drvp[1].drive_flags & DRIVE_DMA) {
1652 		mode[1] = drvp[1].DMA_mode;
1653 		mode[0] = drvp[0].PIO_mode;
1654 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1655 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1656 			mode[0] = drvp[0].PIO_mode = 0;
1657 		goto ok;
1658 	}
1659 	/*
1660 	 * If both drives are not DMA, takes the lower mode, unless
1661 	 * one of them is PIO mode < 2
1662 	 */
1663 	if (drvp[0].PIO_mode < 2) {
1664 		mode[0] = drvp[0].PIO_mode = 0;
1665 		mode[1] = drvp[1].PIO_mode;
1666 	} else if (drvp[1].PIO_mode < 2) {
1667 		mode[1] = drvp[1].PIO_mode = 0;
1668 		mode[0] = drvp[0].PIO_mode;
1669 	} else {
1670 		mode[0] = mode[1] =
1671 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1672 		drvp[0].PIO_mode = mode[0];
1673 		drvp[1].PIO_mode = mode[1];
1674 	}
1675 ok:	/* The modes are setup */
1676 	for (drive = 0; drive < 2; drive++) {
1677 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1678 			idetim |= piix_setup_idetim_timings(
1679 			    mode[drive], 1, chp->channel);
1680 			goto end;
1681 		}
1682 	}
1683 	/* If we are there, none of the drives are DMA */
1684 	if (mode[0] >= 2)
1685 		idetim |= piix_setup_idetim_timings(
1686 		    mode[0], 0, chp->channel);
1687 	else
1688 		idetim |= piix_setup_idetim_timings(
1689 		    mode[1], 0, chp->channel);
1690 end:	/*
1691 	 * timing mode is now set up in the controller. Enable
1692 	 * it per-drive
1693 	 */
1694 	for (drive = 0; drive < 2; drive++) {
1695 		/* If no drive, skip */
1696 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1697 			continue;
1698 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1699 		if (drvp[drive].drive_flags & DRIVE_DMA)
1700 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1701 	}
1702 	if (idedma_ctl != 0) {
1703 		/* Add software bits in status register */
1704 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1705 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1706 		    idedma_ctl);
1707 	}
1708 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1709 	pciide_print_modes(cp);
1710 }
1711 
1712 void
1713 piix3_4_setup_channel(chp)
1714 	struct channel_softc *chp;
1715 {
1716 	struct ata_drive_datas *drvp;
1717 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1718 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1719 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1720 	int drive;
1721 	int channel = chp->channel;
1722 
1723 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1724 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1725 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1726 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1727 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1728 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1729 	    PIIX_SIDETIM_RTC_MASK(channel));
1730 
1731 	idedma_ctl = 0;
1732 	/* If channel disabled, no need to go further */
1733 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1734 		return;
1735 	/* set up new idetim: Enable IDE registers decode */
1736 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1737 
1738 	/* setup DMA if needed */
1739 	pciide_channel_dma_setup(cp);
1740 
1741 	for (drive = 0; drive < 2; drive++) {
1742 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1743 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1744 		drvp = &chp->ch_drive[drive];
1745 		/* If no drive, skip */
1746 		if ((drvp->drive_flags & DRIVE) == 0)
1747 			continue;
1748 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1749 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1750 			goto pio;
1751 
1752 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1753 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1754 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1755 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1756 		    ideconf |= PIIX_CONFIG_PINGPONG;
1757 		}
1758 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1759 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) {
1760 			/* setup Ultra/100 */
1761 			if (drvp->UDMA_mode > 2 &&
1762 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1763 				drvp->UDMA_mode = 2;
1764 			if (drvp->UDMA_mode > 4) {
1765 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1766 			} else {
1767 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1768 				if (drvp->UDMA_mode > 2) {
1769 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1770 					    drive);
1771 				} else {
1772 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1773 					    drive);
1774 				}
1775 			}
1776 		}
1777 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1778 			/* setup Ultra/66 */
1779 			if (drvp->UDMA_mode > 2 &&
1780 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1781 				drvp->UDMA_mode = 2;
1782 			if (drvp->UDMA_mode > 2)
1783 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1784 			else
1785 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1786 		}
1787 
1788 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1789 		    (drvp->drive_flags & DRIVE_UDMA)) {
1790 			/* use Ultra/DMA */
1791 			drvp->drive_flags &= ~DRIVE_DMA;
1792 			udmareg |= PIIX_UDMACTL_DRV_EN( channel,drive);
1793 			udmareg |= PIIX_UDMATIM_SET(
1794 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1795 		} else {
1796 			/* use Multiword DMA */
1797 			drvp->drive_flags &= ~DRIVE_UDMA;
1798 			if (drive == 0) {
1799 				idetim |= piix_setup_idetim_timings(
1800 				    drvp->DMA_mode, 1, channel);
1801 			} else {
1802 				sidetim |= piix_setup_sidetim_timings(
1803 					drvp->DMA_mode, 1, channel);
1804 				idetim =PIIX_IDETIM_SET(idetim,
1805 				    PIIX_IDETIM_SITRE, channel);
1806 			}
1807 		}
1808 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1809 
1810 pio:		/* use PIO mode */
1811 		idetim |= piix_setup_idetim_drvs(drvp);
1812 		if (drive == 0) {
1813 			idetim |= piix_setup_idetim_timings(
1814 			    drvp->PIO_mode, 0, channel);
1815 		} else {
1816 			sidetim |= piix_setup_sidetim_timings(
1817 				drvp->PIO_mode, 0, channel);
1818 			idetim =PIIX_IDETIM_SET(idetim,
1819 			    PIIX_IDETIM_SITRE, channel);
1820 		}
1821 	}
1822 	if (idedma_ctl != 0) {
1823 		/* Add software bits in status register */
1824 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1825 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1826 		    idedma_ctl);
1827 	}
1828 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1829 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1830 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1831 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1832 	pciide_print_modes(cp);
1833 }
1834 
1835 
1836 /* setup ISP and RTC fields, based on mode */
1837 static u_int32_t
1838 piix_setup_idetim_timings(mode, dma, channel)
1839 	u_int8_t mode;
1840 	u_int8_t dma;
1841 	u_int8_t channel;
1842 {
1843 
1844 	if (dma)
1845 		return PIIX_IDETIM_SET(0,
1846 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1847 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1848 		    channel);
1849 	else
1850 		return PIIX_IDETIM_SET(0,
1851 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1852 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1853 		    channel);
1854 }
1855 
1856 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1857 static u_int32_t
1858 piix_setup_idetim_drvs(drvp)
1859 	struct ata_drive_datas *drvp;
1860 {
1861 	u_int32_t ret = 0;
1862 	struct channel_softc *chp = drvp->chnl_softc;
1863 	u_int8_t channel = chp->channel;
1864 	u_int8_t drive = drvp->drive;
1865 
1866 	/*
1867 	 * If drive is using UDMA, timings setups are independant
1868 	 * So just check DMA and PIO here.
1869 	 */
1870 	if (drvp->drive_flags & DRIVE_DMA) {
1871 		/* if mode = DMA mode 0, use compatible timings */
1872 		if ((drvp->drive_flags & DRIVE_DMA) &&
1873 		    drvp->DMA_mode == 0) {
1874 			drvp->PIO_mode = 0;
1875 			return ret;
1876 		}
1877 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1878 		/*
1879 		 * PIO and DMA timings are the same, use fast timings for PIO
1880 		 * too, else use compat timings.
1881 		 */
1882 		if ((piix_isp_pio[drvp->PIO_mode] !=
1883 		    piix_isp_dma[drvp->DMA_mode]) ||
1884 		    (piix_rtc_pio[drvp->PIO_mode] !=
1885 		    piix_rtc_dma[drvp->DMA_mode]))
1886 			drvp->PIO_mode = 0;
1887 		/* if PIO mode <= 2, use compat timings for PIO */
1888 		if (drvp->PIO_mode <= 2) {
1889 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1890 			    channel);
1891 			return ret;
1892 		}
1893 	}
1894 
1895 	/*
1896 	 * Now setup PIO modes. If mode < 2, use compat timings.
1897 	 * Else enable fast timings. Enable IORDY and prefetch/post
1898 	 * if PIO mode >= 3.
1899 	 */
1900 
1901 	if (drvp->PIO_mode < 2)
1902 		return ret;
1903 
1904 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1905 	if (drvp->PIO_mode >= 3) {
1906 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1907 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1908 	}
1909 	return ret;
1910 }
1911 
1912 /* setup values in SIDETIM registers, based on mode */
1913 static u_int32_t
1914 piix_setup_sidetim_timings(mode, dma, channel)
1915 	u_int8_t mode;
1916 	u_int8_t dma;
1917 	u_int8_t channel;
1918 {
1919 	if (dma)
1920 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1921 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1922 	else
1923 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1924 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1925 }
1926 
1927 void
1928 amd756_chip_map(sc, pa)
1929 	struct pciide_softc *sc;
1930 	struct pci_attach_args *pa;
1931 {
1932 	struct pciide_channel *cp;
1933 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1934 	int channel;
1935 	pcireg_t chanenable;
1936 	bus_size_t cmdsize, ctlsize;
1937 
1938 	if (pciide_chipen(sc, pa) == 0)
1939 		return;
1940 
1941 	printf(": DMA");
1942 	pciide_mapreg_dma(sc, pa);
1943 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1944 	    WDC_CAPABILITY_MODE;
1945 	if (sc->sc_dma_ok) {
1946 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1947                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1948                 sc->sc_wdcdev.irqack = pciide_irqack;
1949 	}
1950 	sc->sc_wdcdev.PIO_cap = 4;
1951 	sc->sc_wdcdev.DMA_cap = 2;
1952 	switch (sc->sc_pp->ide_product) {
1953 	case PCI_PRODUCT_AMD_766_IDE:
1954 		sc->sc_wdcdev.UDMA_cap = 5;
1955 		break;
1956 	default:
1957 		sc->sc_wdcdev.UDMA_cap = 4;
1958 		break;
1959 	}
1960 	sc->sc_wdcdev.set_modes = amd756_setup_channel;
1961 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1962 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1963 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1964 
1965 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1966 
1967 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1968 		cp = &sc->pciide_channels[channel];
1969 		if (pciide_chansetup(sc, channel, interface) == 0)
1970 			continue;
1971 
1972 		if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1973 			printf("%s: %s ignored (disabled)\n",
1974 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1975 			continue;
1976 		}
1977 		pciide_map_compat_intr(pa, cp, channel, interface);
1978 		if (cp->hw_ok == 0)
1979 			continue;
1980 
1981 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1982 		    pciide_pci_intr);
1983 
1984 		if (pciide_chan_candisable(cp)) {
1985 			chanenable &= ~AMD756_CHAN_EN(channel);
1986 		}
1987 		if (cp->hw_ok == 0) {
1988 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1989 			continue;
1990 		}
1991 
1992 		amd756_setup_channel(&cp->wdc_channel);
1993 	}
1994 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1995 	    chanenable);
1996 	return;
1997 }
1998 
1999 void
2000 amd756_setup_channel(chp)
2001 	struct channel_softc *chp;
2002 {
2003 	u_int32_t udmatim_reg, datatim_reg;
2004 	u_int8_t idedma_ctl;
2005 	int mode, drive;
2006 	struct ata_drive_datas *drvp;
2007 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2008 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2009 #ifndef	PCIIDE_AMD756_ENABLEDMA
2010 	int product = PCI_PRODUCT(
2011 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG));
2012 	int rev = PCI_REVISION(
2013 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2014 #endif
2015 
2016 	idedma_ctl = 0;
2017 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
2018 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
2019 	datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
2020 	udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
2021 
2022 	/* setup DMA if needed */
2023 	pciide_channel_dma_setup(cp);
2024 
2025 	for (drive = 0; drive < 2; drive++) {
2026 		drvp = &chp->ch_drive[drive];
2027 		/* If no drive, skip */
2028 		if ((drvp->drive_flags & DRIVE) == 0)
2029 			continue;
2030 		/* add timing values, setup DMA if needed */
2031 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2032 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2033 			mode = drvp->PIO_mode;
2034 			goto pio;
2035 		}
2036 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2037 		    (drvp->drive_flags & DRIVE_UDMA)) {
2038 			/* use Ultra/DMA */
2039 			drvp->drive_flags &= ~DRIVE_DMA;
2040 			udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
2041 			    AMD756_UDMA_EN_MTH(chp->channel, drive) |
2042 			    AMD756_UDMA_TIME(chp->channel, drive,
2043 				amd756_udma_tim[drvp->UDMA_mode]);
2044 			/* can use PIO timings, MW DMA unused */
2045 			mode = drvp->PIO_mode;
2046 		} else {
2047 			/* use Multiword DMA, but only if revision is OK */
2048 			drvp->drive_flags &= ~DRIVE_UDMA;
2049 #ifndef PCIIDE_AMD756_ENABLEDMA
2050 			/*
2051 			 * The workaround doesn't seem to be necessary
2052 			 * with all drives, so it can be disabled by
2053 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2054 			 * triggered.
2055 			 */
2056 			if (AMD756_CHIPREV_DISABLEDMA(product, rev)) {
2057 				printf("%s:%d:%d: multi-word DMA disabled due "
2058 				    "to chip revision\n",
2059 				    sc->sc_wdcdev.sc_dev.dv_xname,
2060 				    chp->channel, drive);
2061 				mode = drvp->PIO_mode;
2062 				drvp->drive_flags &= ~DRIVE_DMA;
2063 				goto pio;
2064 			}
2065 #endif
2066 			/* mode = min(pio, dma+2) */
2067 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2068 				mode = drvp->PIO_mode;
2069 			else
2070 				mode = drvp->DMA_mode + 2;
2071 		}
2072 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2073 
2074 pio:		/* setup PIO mode */
2075 		if (mode <= 2) {
2076 			drvp->DMA_mode = 0;
2077 			drvp->PIO_mode = 0;
2078 			mode = 0;
2079 		} else {
2080 			drvp->PIO_mode = mode;
2081 			drvp->DMA_mode = mode - 2;
2082 		}
2083 		datatim_reg |=
2084 		    AMD756_DATATIM_PULSE(chp->channel, drive,
2085 			amd756_pio_set[mode]) |
2086 		    AMD756_DATATIM_RECOV(chp->channel, drive,
2087 			amd756_pio_rec[mode]);
2088 	}
2089 	if (idedma_ctl != 0) {
2090 		/* Add software bits in status register */
2091 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2092 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2093 		    idedma_ctl);
2094 	}
2095 	pciide_print_modes(cp);
2096 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
2097 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
2098 }
2099 
2100 void
2101 apollo_chip_map(sc, pa)
2102 	struct pciide_softc *sc;
2103 	struct pci_attach_args *pa;
2104 {
2105 	struct pciide_channel *cp;
2106 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2107 	int channel;
2108 	u_int32_t ideconf;
2109 	bus_size_t cmdsize, ctlsize;
2110 	pcitag_t pcib_tag;
2111 	pcireg_t pcib_id, pcib_class;
2112 
2113 	if (pciide_chipen(sc, pa) == 0)
2114 		return;
2115 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2116 
2117 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2118 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2119 
2120 	switch (PCI_PRODUCT(pcib_id)) {
2121 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2122 		if (PCI_REVISION(pcib_class) >= 0x02) {
2123 			printf(": ATA33");
2124 			sc->sc_wdcdev.UDMA_cap = 2;
2125 		} else {
2126 			printf(": DMA");
2127 			sc->sc_wdcdev.UDMA_cap = 0;
2128 		}
2129 		break;
2130 	case PCI_PRODUCT_VIATECH_VT82C596A:
2131 		if (PCI_REVISION(pcib_class) >= 0x12) {
2132 			printf(": ATA66");
2133 			sc->sc_wdcdev.UDMA_cap = 4;
2134 		} else {
2135 			printf(": ATA33");
2136 			sc->sc_wdcdev.UDMA_cap = 2;
2137 		}
2138 		break;
2139 
2140 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2141 		if (PCI_REVISION(pcib_class) >= 0x40) {
2142 			printf(": ATA100");
2143 			sc->sc_wdcdev.UDMA_cap = 5;
2144 		} else {
2145 			printf(": ATA66");
2146 			sc->sc_wdcdev.UDMA_cap = 4;
2147 		}
2148 		break;
2149 	default:
2150 		printf(": DMA");
2151 		sc->sc_wdcdev.UDMA_cap = 0;
2152 		break;
2153 	}
2154 
2155 	pciide_mapreg_dma(sc, pa);
2156 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2157 	    WDC_CAPABILITY_MODE;
2158 	if (sc->sc_dma_ok) {
2159 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2160 		sc->sc_wdcdev.irqack = pciide_irqack;
2161 		if (sc->sc_wdcdev.UDMA_cap > 0)
2162 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2163 	}
2164 	sc->sc_wdcdev.PIO_cap = 4;
2165 	sc->sc_wdcdev.DMA_cap = 2;
2166 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2167 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2168 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2169 
2170 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2171 
2172 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2173 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2174 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2175 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2176 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2177 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2178 	    DEBUG_PROBE);
2179 
2180 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2181 		cp = &sc->pciide_channels[channel];
2182 		if (pciide_chansetup(sc, channel, interface) == 0)
2183 			continue;
2184 
2185 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2186 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2187 			printf("%s: %s ignored (disabled)\n",
2188 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2189 			continue;
2190 		}
2191 		pciide_map_compat_intr(pa, cp, channel, interface);
2192 		if (cp->hw_ok == 0)
2193 			continue;
2194 
2195 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2196 		    pciide_pci_intr);
2197 		if (cp->hw_ok == 0) {
2198 			goto next;
2199 		}
2200 		if (pciide_chan_candisable(cp)) {
2201 			ideconf &= ~APO_IDECONF_EN(channel);
2202 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2203 				    ideconf);
2204 		}
2205 
2206 		if (cp->hw_ok == 0)
2207 			goto next;
2208 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2209 next:
2210 		if (cp->hw_ok == 0)
2211 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2212 	}
2213 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2214 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2215 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2216 }
2217 
2218 void
2219 apollo_setup_channel(chp)
2220 	struct channel_softc *chp;
2221 {
2222 	u_int32_t udmatim_reg, datatim_reg;
2223 	u_int8_t idedma_ctl;
2224 	int mode, drive;
2225 	struct ata_drive_datas *drvp;
2226 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2227 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2228 
2229 	idedma_ctl = 0;
2230 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2231 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2232 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2233 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2234 
2235 	/* setup DMA if needed */
2236 	pciide_channel_dma_setup(cp);
2237 
2238 	/*
2239 	 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2240 	 * downgrade to Ultra/33 if needed
2241 	 */
2242 	if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2243 	    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2244 		/* both drives UDMA */
2245 		if (chp->ch_drive[0].UDMA_mode > 2 &&
2246 		    chp->ch_drive[1].UDMA_mode <= 2) {
2247 			/* drive 0 Ultra/66, drive 1 Ultra/33 */
2248 			chp->ch_drive[0].UDMA_mode = 2;
2249 		} else if (chp->ch_drive[1].UDMA_mode > 2 &&
2250 		    chp->ch_drive[0].UDMA_mode <= 2) {
2251 			/* drive 1 Ultra/66, drive 0 Ultra/33 */
2252 			chp->ch_drive[1].UDMA_mode = 2;
2253 		}
2254 	}
2255 
2256 	for (drive = 0; drive < 2; drive++) {
2257 		drvp = &chp->ch_drive[drive];
2258 		/* If no drive, skip */
2259 		if ((drvp->drive_flags & DRIVE) == 0)
2260 			continue;
2261 		/* add timing values, setup DMA if needed */
2262 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2263 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2264 			mode = drvp->PIO_mode;
2265 			goto pio;
2266 		}
2267 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2268 		    (drvp->drive_flags & DRIVE_UDMA)) {
2269 			/* use Ultra/DMA */
2270 			drvp->drive_flags &= ~DRIVE_DMA;
2271 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2272 			    APO_UDMA_EN_MTH(chp->channel, drive);
2273 
2274 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2275 				/* 686b */
2276 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2277 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2278 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2279 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2280 				/* 596b or 686a */
2281 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2282 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2283 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2284 			} else {
2285 				/* 596a or 586b */
2286 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2287 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2288 			}
2289 			/* can use PIO timings, MW DMA unused */
2290 			mode = drvp->PIO_mode;
2291 		} else {
2292 			/* use Multiword DMA */
2293 			drvp->drive_flags &= ~DRIVE_UDMA;
2294 			/* mode = min(pio, dma+2) */
2295 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2296 				mode = drvp->PIO_mode;
2297 			else
2298 				mode = drvp->DMA_mode + 2;
2299 		}
2300 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2301 
2302 pio:		/* setup PIO mode */
2303 		if (mode <= 2) {
2304 			drvp->DMA_mode = 0;
2305 			drvp->PIO_mode = 0;
2306 			mode = 0;
2307 		} else {
2308 			drvp->PIO_mode = mode;
2309 			drvp->DMA_mode = mode - 2;
2310 		}
2311 		datatim_reg |=
2312 		    APO_DATATIM_PULSE(chp->channel, drive,
2313 			apollo_pio_set[mode]) |
2314 		    APO_DATATIM_RECOV(chp->channel, drive,
2315 			apollo_pio_rec[mode]);
2316 	}
2317 	if (idedma_ctl != 0) {
2318 		/* Add software bits in status register */
2319 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2320 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2321 		    idedma_ctl);
2322 	}
2323 	pciide_print_modes(cp);
2324 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2325 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2326 }
2327 
2328 void
2329 cmd_channel_map(pa, sc, channel)
2330 	struct pci_attach_args *pa;
2331 	struct pciide_softc *sc;
2332 	int channel;
2333 {
2334 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2335 	bus_size_t cmdsize, ctlsize;
2336 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2337 	pcireg_t interface;
2338 
2339 	/*
2340 	 * The 0648/0649 can be told to identify as a RAID controller.
2341 	 * In this case, we have to fake interface
2342 	 */
2343 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2344 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2345 		    PCIIDE_INTERFACE_SETTABLE(1);
2346 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2347 		    CMD_CONF_DSA1)
2348 			interface |= PCIIDE_INTERFACE_PCI(0) |
2349 			    PCIIDE_INTERFACE_PCI(1);
2350 	} else {
2351 		interface = PCI_INTERFACE(pa->pa_class);
2352 	}
2353 
2354 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2355 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2356 	cp->wdc_channel.channel = channel;
2357 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2358 
2359 	if (channel > 0) {
2360 		cp->wdc_channel.ch_queue =
2361 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2362 	} else {
2363 		cp->wdc_channel.ch_queue =
2364 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2365 	}
2366 	if (cp->wdc_channel.ch_queue == NULL) {
2367 		printf(
2368 		    "%s: %s cannot allocate memory for command queue",
2369 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2370 		return;
2371 	}
2372 
2373 	/*
2374 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2375 	 * there's no way to disable the first channel without disabling
2376 	 * the whole device
2377 	 */
2378 	 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2379 		printf("%s: %s ignored (disabled)\n",
2380 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2381 		return;
2382 	}
2383 	pciide_map_compat_intr(pa, cp, channel, interface);
2384 	if (cp->hw_ok == 0)
2385 		return;
2386 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2387 	if (cp->hw_ok == 0) {
2388 		pciide_unmap_compat_intr(pa, cp, channel, interface);
2389 		return;
2390 	}
2391 	if (channel == 1) {
2392 		if (pciide_chan_candisable(cp)) {
2393 			ctrl &= ~CMD_CTRL_2PORT;
2394 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2395 			    CMD_CTRL, ctrl);
2396 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2397 		}
2398 	}
2399 }
2400 
2401 int
2402 cmd_pci_intr(arg)
2403 	void *arg;
2404 {
2405 	struct pciide_softc *sc = arg;
2406 	struct pciide_channel *cp;
2407 	struct channel_softc *wdc_cp;
2408 	int i, rv, crv;
2409 	u_int32_t priirq, secirq;
2410 
2411 	rv = 0;
2412 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2413 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2414 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2415 		cp = &sc->pciide_channels[i];
2416 		wdc_cp = &cp->wdc_channel;
2417 		/* If a compat channel skip. */
2418 		if (cp->compat)
2419 			continue;
2420 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2421 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2422 			crv = wdcintr(wdc_cp);
2423 			if (crv == 0)
2424 				printf("%s:%d: bogus intr\n",
2425 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2426 			else
2427 				rv = 1;
2428 		}
2429 	}
2430 	return rv;
2431 }
2432 
2433 void
2434 cmd_chip_map(sc, pa)
2435 	struct pciide_softc *sc;
2436 	struct pci_attach_args *pa;
2437 {
2438 	int channel;
2439 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2440 	/*
2441  	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2442 	 * and base adresses registers can be disabled at
2443  	 * hardware level. In this case, the device is wired
2444 	 * in compat mode and its first channel is always enabled,
2445 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2446 	 * In fact, it seems that the first channel of the CMD PCI0640
2447 	 * can't be disabled.
2448  	 */
2449 
2450 #ifdef PCIIDE_CMD064x_DISABLE
2451 	if (pciide_chipen(sc, pa) == 0)
2452 		return;
2453 #endif
2454 
2455 	printf(": no DMA");
2456 	sc->sc_dma_ok = 0;
2457 
2458 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2459 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2460 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2461 
2462 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2463 
2464 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2465 		cmd_channel_map(pa, sc, channel);
2466 	}
2467 }
2468 
2469 void
2470 cmd0643_9_chip_map(sc, pa)
2471 	struct pciide_softc *sc;
2472 	struct pci_attach_args *pa;
2473 {
2474 	struct pciide_channel *cp;
2475 	int channel;
2476 	int rev = PCI_REVISION(
2477 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2478 	pcireg_t interface;
2479 
2480 	/*
2481 	 * The 0648/0649 can be told to identify as a RAID controller.
2482 	 * In this case, we have to fake interface
2483 	 */
2484 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2485 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2486 		    PCIIDE_INTERFACE_SETTABLE(1);
2487 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2488 		    CMD_CONF_DSA1)
2489 			interface |= PCIIDE_INTERFACE_PCI(0) |
2490 			    PCIIDE_INTERFACE_PCI(1);
2491 	} else {
2492 		interface = PCI_INTERFACE(pa->pa_class);
2493 	}
2494 
2495 	/*
2496 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2497 	 * and base adresses registers can be disabled at
2498 	 * hardware level. In this case, the device is wired
2499 	 * in compat mode and its first channel is always enabled,
2500  	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2501 	 * In fact, it seems that the first channel of the CMD PCI0640
2502 	 * can't be disabled.
2503 	*/
2504 
2505 #ifdef PCIIDE_CMD064x_DISABLE
2506 	if (pciide_chipen(sc, pa) == 0)
2507 		return;
2508 #endif
2509 	printf(": DMA");
2510 	pciide_mapreg_dma(sc, pa);
2511 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2512 	    WDC_CAPABILITY_MODE;
2513 	if (sc->sc_dma_ok) {
2514 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2515 		switch (sc->sc_pp->ide_product) {
2516 		case PCI_PRODUCT_CMDTECH_649:
2517 		case PCI_PRODUCT_CMDTECH_648:
2518 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2519 			sc->sc_wdcdev.UDMA_cap = 4;
2520 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2521 			break;
2522                 case PCI_PRODUCT_CMDTECH_646:
2523                         if (rev >= CMD0646U2_REV) {
2524                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2525                                 sc->sc_wdcdev.UDMA_cap = 2;
2526                         } else if (rev >= CMD0646U_REV) {
2527                         /*
2528                          * Linux's driver claims that the 646U is broken
2529                          * with UDMA. Only enable it if we know what we're
2530                          * doing
2531                          */
2532 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2533                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2534                                 sc->sc_wdcdev.UDMA_cap = 2;
2535 #endif
2536                                 /* explicitely disable UDMA */
2537                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2538                                     CMD_UDMATIM(0), 0);
2539                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2540                                     CMD_UDMATIM(1), 0);
2541                         }
2542                         sc->sc_wdcdev.irqack = cmd646_9_irqack;
2543                         break;
2544 		default:
2545 			sc->sc_wdcdev.irqack = pciide_irqack;
2546 		}
2547 	}
2548 
2549 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2550 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2551 	sc->sc_wdcdev.PIO_cap = 4;
2552 	sc->sc_wdcdev.DMA_cap = 2;
2553 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2554 
2555 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2556 
2557 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2558 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2559 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2560 		DEBUG_PROBE);
2561 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2562 		cp = &sc->pciide_channels[channel];
2563 		cmd_channel_map(pa, sc, channel);
2564 		if (cp->hw_ok == 0)
2565 			continue;
2566 		cmd0643_9_setup_channel(&cp->wdc_channel);
2567 	}
2568 	/*
2569 	 * note - this also makes sure we clear the irq disable and reset
2570 	 * bits
2571 	 */
2572 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2573 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2574 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2575 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2576 	    DEBUG_PROBE);
2577 }
2578 
2579 void
2580 cmd0643_9_setup_channel(chp)
2581 	struct channel_softc *chp;
2582 {
2583 	struct ata_drive_datas *drvp;
2584 	u_int8_t tim;
2585 	u_int32_t idedma_ctl, udma_reg;
2586 	int drive;
2587 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2588 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2589 
2590 	idedma_ctl = 0;
2591 	/* setup DMA if needed */
2592 	pciide_channel_dma_setup(cp);
2593 
2594 	for (drive = 0; drive < 2; drive++) {
2595 		drvp = &chp->ch_drive[drive];
2596 		/* If no drive, skip */
2597 		if ((drvp->drive_flags & DRIVE) == 0)
2598 			continue;
2599 		/* add timing values, setup DMA if needed */
2600 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2601 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2602 			if (drvp->drive_flags & DRIVE_UDMA) {
2603 				/* UltraDMA on a 646U2, 0648 or 0649 */
2604 				drvp->drive_flags &= ~DRIVE_DMA;
2605 				udma_reg = pciide_pci_read(sc->sc_pc,
2606 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2607 				if (drvp->UDMA_mode > 2 &&
2608 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2609 				    CMD_BICSR) &
2610 				    CMD_BICSR_80(chp->channel)) == 0)
2611 					drvp->UDMA_mode = 2;
2612 				if (drvp->UDMA_mode > 2)
2613 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2614 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2615 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2616 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2617 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2618 				    CMD_UDMATIM_TIM_OFF(drive));
2619 				udma_reg |=
2620 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2621 				    CMD_UDMATIM_TIM_OFF(drive));
2622 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2623 				    CMD_UDMATIM(chp->channel), udma_reg);
2624 			} else {
2625 				/*
2626 				 * use Multiword DMA.
2627 				 * Timings will be used for both PIO and DMA,
2628 				 * so adjust DMA mode if needed
2629 				 * if we have a 0646U2/8/9, turn off UDMA
2630 				 */
2631 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2632 					udma_reg = pciide_pci_read(sc->sc_pc,
2633 					    sc->sc_tag,
2634 					    CMD_UDMATIM(chp->channel));
2635 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2636 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2637 					    CMD_UDMATIM(chp->channel),
2638 					    udma_reg);
2639 				}
2640 				if (drvp->PIO_mode >= 3 &&
2641 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2642 					drvp->DMA_mode = drvp->PIO_mode - 2;
2643 				}
2644 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2645 			}
2646 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2647 		}
2648 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2649 		    CMD_DATA_TIM(chp->channel, drive), tim);
2650 	}
2651 	if (idedma_ctl != 0) {
2652 		/* Add software bits in status register */
2653 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2654 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2655 		    idedma_ctl);
2656 	}
2657 	pciide_print_modes(cp);
2658 }
2659 
2660 void
2661 cmd646_9_irqack(chp)
2662         struct channel_softc *chp;
2663 {
2664         u_int32_t priirq, secirq;
2665         struct pciide_channel *cp = (struct pciide_channel*)chp;
2666         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2667 
2668         if (chp->channel == 0) {
2669                 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2670                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2671         } else {
2672                 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2673                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2674         }
2675         pciide_irqack(chp);
2676 }
2677 
2678 void
2679 cy693_chip_map(sc, pa)
2680 	struct pciide_softc *sc;
2681 	struct pci_attach_args *pa;
2682 {
2683 	struct pciide_channel *cp;
2684 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2685 	bus_size_t cmdsize, ctlsize;
2686 
2687 	if (pciide_chipen(sc, pa) == 0)
2688 		return;
2689 	/*
2690 	 * this chip has 2 PCI IDE functions, one for primary and one for
2691 	 * secondary. So we need to call pciide_mapregs_compat() with
2692 	 * the real channel
2693 	 */
2694 	if (pa->pa_function == 1) {
2695 		sc->sc_cy_compatchan = 0;
2696 	} else if (pa->pa_function == 2) {
2697 		sc->sc_cy_compatchan = 1;
2698 	} else {
2699 		printf(": unexpected PCI function %d\n", pa->pa_function);
2700 		return;
2701 	}
2702 
2703 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2704 		printf(": DMA");
2705 		pciide_mapreg_dma(sc, pa);
2706 	} else {
2707 		printf(": no DMA");
2708 		sc->sc_dma_ok = 0;
2709 	}
2710 
2711 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2712 	if (sc->sc_cy_handle == NULL) {
2713 		printf(", (unable to map ctl registers)");
2714 		sc->sc_dma_ok = 0;
2715 	}
2716 
2717 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2718 	    WDC_CAPABILITY_MODE;
2719 	if (sc->sc_dma_ok) {
2720 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2721 		sc->sc_wdcdev.irqack = pciide_irqack;
2722 	}
2723 	sc->sc_wdcdev.PIO_cap = 4;
2724 	sc->sc_wdcdev.DMA_cap = 2;
2725 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2726 
2727 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2728 	sc->sc_wdcdev.nchannels = 1;
2729 
2730 	/* Only one channel for this chip; if we are here it's enabled */
2731 	cp = &sc->pciide_channels[0];
2732 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2733 	cp->name = PCIIDE_CHANNEL_NAME(0);
2734 	cp->wdc_channel.channel = 0;
2735 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2736 	cp->wdc_channel.ch_queue =
2737 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2738 	if (cp->wdc_channel.ch_queue == NULL) {
2739 		printf(": cannot allocate memory for command queue\n");
2740 		return;
2741 	}
2742 	printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0),
2743 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2744 	    "configured" : "wired");
2745 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2746 		printf("native-PCI\n");
2747 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2748 		    pciide_pci_intr);
2749 	} else {
2750 		printf("compatibility\n");
2751 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2752 		    &cmdsize, &ctlsize);
2753 	}
2754 
2755 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2756 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2757 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2758 	if (cp->hw_ok == 0)
2759 		return;
2760 	wdcattach(&cp->wdc_channel);
2761 	if (pciide_chan_candisable(cp)) {
2762 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2763 		    PCI_COMMAND_STATUS_REG, 0);
2764 	}
2765 	if (cp->hw_ok == 0) {
2766 		pciide_unmap_compat_intr(pa, cp, sc->sc_cy_compatchan,
2767 		    interface);
2768 		return;
2769 	}
2770 
2771 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2772 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2773 	cy693_setup_channel(&cp->wdc_channel);
2774 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2775 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2776 }
2777 
2778 void
2779 cy693_setup_channel(chp)
2780 	struct channel_softc *chp;
2781 {
2782 	struct ata_drive_datas *drvp;
2783 	int drive;
2784 	u_int32_t cy_cmd_ctrl;
2785 	u_int32_t idedma_ctl;
2786 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2787 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2788 	int dma_mode = -1;
2789 
2790 	cy_cmd_ctrl = idedma_ctl = 0;
2791 
2792 	/* setup DMA if needed */
2793 	pciide_channel_dma_setup(cp);
2794 
2795 	for (drive = 0; drive < 2; drive++) {
2796 		drvp = &chp->ch_drive[drive];
2797 		/* If no drive, skip */
2798 		if ((drvp->drive_flags & DRIVE) == 0)
2799 			continue;
2800 		/* add timing values, setup DMA if needed */
2801 		if (drvp->drive_flags & DRIVE_DMA) {
2802 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2803 			/* use Multiword DMA */
2804 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2805 				dma_mode = drvp->DMA_mode;
2806 		}
2807 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2808 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2809 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2810 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2811 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2812 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2813 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2814 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2815 	}
2816 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2817 	chp->ch_drive[0].DMA_mode = dma_mode;
2818 	chp->ch_drive[1].DMA_mode = dma_mode;
2819 
2820 	if (dma_mode == -1)
2821 		dma_mode = 0;
2822 
2823 	if (sc->sc_cy_handle != NULL) {
2824 		/* Note: `multiple' is implied. */
2825 		cy82c693_write(sc->sc_cy_handle,
2826 		    (sc->sc_cy_compatchan == 0) ?
2827 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2828 	}
2829 
2830 	pciide_print_modes(cp);
2831 
2832 	if (idedma_ctl != 0) {
2833 		/* Add software bits in status register */
2834 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2835 		    IDEDMA_CTL, idedma_ctl);
2836 	}
2837 }
2838 
2839 void
2840 sis_chip_map(sc, pa)
2841 	struct pciide_softc *sc;
2842 	struct pci_attach_args *pa;
2843 {
2844 	struct pciide_channel *cp;
2845 	int channel;
2846 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2847 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2848 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2849 	bus_size_t cmdsize, ctlsize;
2850 	pcitag_t pchb_tag;
2851 	pcireg_t pchb_id, pchb_class;
2852 
2853 	if (pciide_chipen(sc, pa) == 0)
2854 		return;
2855 
2856 	printf(": DMA");
2857 	pciide_mapreg_dma(sc, pa);
2858 
2859 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2860 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2861 	/* and read ID and rev of the ISA bridge */
2862 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2863 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2864 
2865 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2866 	    WDC_CAPABILITY_MODE;
2867 	if (sc->sc_dma_ok) {
2868 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2869 		sc->sc_wdcdev.irqack = pciide_irqack;
2870 		/*
2871 		 * controllers with rev > 0xd0 support UDMA 2 at least
2872 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2873 		 * have problems with UDMA
2874 		 */
2875 		if (rev >= 0xd0 &&
2876 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_SiS530 ||
2877 		    PCI_REVISION(pchb_class) >= 0x03))
2878 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2879 	}
2880 
2881 	sc->sc_wdcdev.PIO_cap = 4;
2882 	sc->sc_wdcdev.DMA_cap = 2;
2883 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2884 		sc->sc_wdcdev.UDMA_cap = 2;
2885 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2886 
2887 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2888 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2889 
2890 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2891 
2892 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2893 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2894 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2895 
2896 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2897 		cp = &sc->pciide_channels[channel];
2898 		if (pciide_chansetup(sc, channel, interface) == 0)
2899 			continue;
2900 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2901 	 	    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2902 			printf("%s: %s ignored (disabled)\n",
2903 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2904 			continue;
2905 		}
2906 		pciide_map_compat_intr(pa, cp, channel, interface);
2907 		if (cp->hw_ok == 0)
2908 			continue;
2909 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2910 		    pciide_pci_intr);
2911 		if (cp->hw_ok == 0) {
2912 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2913 			continue;
2914 		}
2915 		if (pciide_chan_candisable(cp)) {
2916 			if (channel == 0)
2917 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2918 			else
2919 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2920 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2921 			    sis_ctr0);
2922 		}
2923 		if (cp->hw_ok == 0) {
2924 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2925 			continue;
2926 		}
2927 		sis_setup_channel(&cp->wdc_channel);
2928 	}
2929 }
2930 
2931 void
2932 sis_setup_channel(chp)
2933 	struct channel_softc *chp;
2934 {
2935 	struct ata_drive_datas *drvp;
2936 	int drive;
2937 	u_int32_t sis_tim;
2938 	u_int32_t idedma_ctl;
2939 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2940 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2941 
2942 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2943 	    "channel %d 0x%x\n", chp->channel,
2944 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2945 	    DEBUG_PROBE);
2946 	sis_tim = 0;
2947 	idedma_ctl = 0;
2948 	/* setup DMA if needed */
2949 	pciide_channel_dma_setup(cp);
2950 
2951 	for (drive = 0; drive < 2; drive++) {
2952 		drvp = &chp->ch_drive[drive];
2953 		/* If no drive, skip */
2954 		if ((drvp->drive_flags & DRIVE) == 0)
2955 			continue;
2956 		/* add timing values, setup DMA if needed */
2957 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2958 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2959 			goto pio;
2960 
2961 		if (drvp->drive_flags & DRIVE_UDMA) {
2962 			/* use Ultra/DMA */
2963 			drvp->drive_flags &= ~DRIVE_DMA;
2964 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2965 			    SIS_TIM_UDMA_TIME_OFF(drive);
2966 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2967 		} else {
2968 			/*
2969 			 * use Multiword DMA
2970 			 * Timings will be used for both PIO and DMA,
2971 			 * so adjust DMA mode if needed
2972 			 */
2973 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2974 				drvp->PIO_mode = drvp->DMA_mode + 2;
2975 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2976 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2977 				    drvp->PIO_mode - 2 : 0;
2978 			if (drvp->DMA_mode == 0)
2979 				drvp->PIO_mode = 0;
2980 		}
2981 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2982 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2983 		    SIS_TIM_ACT_OFF(drive);
2984 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2985 		    SIS_TIM_REC_OFF(drive);
2986 	}
2987 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2988 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2989 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2990 	if (idedma_ctl != 0) {
2991 		/* Add software bits in status register */
2992 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2993 		    IDEDMA_CTL, idedma_ctl);
2994 	}
2995 	pciide_print_modes(cp);
2996 }
2997 
2998 void
2999 acer_chip_map(sc, pa)
3000 	struct pciide_softc *sc;
3001 	struct pci_attach_args *pa;
3002 {
3003 	struct pciide_channel *cp;
3004 	int channel;
3005 	pcireg_t cr, interface;
3006 	bus_size_t cmdsize, ctlsize;
3007 	pcireg_t rev = PCI_REVISION(pa->pa_class);
3008 
3009 	if (pciide_chipen(sc, pa) == 0)
3010 		return;
3011 
3012 	printf(": DMA");
3013 	pciide_mapreg_dma(sc, pa);
3014 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3015 	    WDC_CAPABILITY_MODE;
3016 
3017 	if (rev < 0xC4)
3018 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA;
3019 
3020 	if (sc->sc_dma_ok) {
3021 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3022 		if (rev >= 0x20) {
3023 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3024 			if (rev >= 0xC4)
3025 				sc->sc_wdcdev.UDMA_cap = 5;
3026 			else if (rev >= 0xC2)
3027 				sc->sc_wdcdev.UDMA_cap = 4;
3028 			else
3029 				sc->sc_wdcdev.UDMA_cap = 2;
3030 		}
3031 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3032 		sc->sc_wdcdev.irqack = pciide_irqack;
3033 	}
3034 
3035 	sc->sc_wdcdev.PIO_cap = 4;
3036 	sc->sc_wdcdev.DMA_cap = 2;
3037 	sc->sc_wdcdev.set_modes = acer_setup_channel;
3038 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3039 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3040 
3041 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3042 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3043 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3044 
3045 	/* Enable "microsoft register bits" R/W. */
3046 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3047 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3048 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3049 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3050 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3051 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3052 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3053 	    ~ACER_CHANSTATUSREGS_RO);
3054 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3055 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3056 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3057 	/* Don't use cr, re-read the real register content instead */
3058 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3059 	    PCI_CLASS_REG));
3060 
3061 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3062 
3063 	/* From linux: enable "Cable Detection" */
3064 	if (rev >= 0xC2) {
3065 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3066 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3067 		    | ACER_0x4B_CDETECT);
3068 		/* set south-bridge's enable bit, m1533, 0x79 */
3069 		if (rev == 0xC2)
3070 			/* 1543C-B0 (m1533, 0x79, bit 2) */
3071 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3072 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3073 			    | ACER_0x79_REVC2_EN);
3074 		else
3075 			/* 1553/1535 (m1533, 0x79, bit 1) */
3076 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3077 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3078 			    | ACER_0x79_EN);
3079 	}
3080 
3081 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3082 		cp = &sc->pciide_channels[channel];
3083 		if (pciide_chansetup(sc, channel, interface) == 0)
3084 			continue;
3085 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3086 			printf("%s: %s ignored (disabled)\n",
3087 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3088 			continue;
3089 		}
3090 		pciide_map_compat_intr(pa, cp, channel, interface);
3091 		if (cp->hw_ok == 0)
3092 			continue;
3093 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3094 		    (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3095 		if (cp->hw_ok == 0) {
3096 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3097 			continue;
3098 		}
3099 		if (pciide_chan_candisable(cp)) {
3100 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3101 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3102 			    PCI_CLASS_REG, cr);
3103 		}
3104 		if (cp->hw_ok == 0) {
3105 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3106 			continue;
3107 		}
3108 		acer_setup_channel(&cp->wdc_channel);
3109 	}
3110 }
3111 
3112 void
3113 acer_setup_channel(chp)
3114 	struct channel_softc *chp;
3115 {
3116 	struct ata_drive_datas *drvp;
3117 	int drive;
3118 	u_int32_t acer_fifo_udma;
3119 	u_int32_t idedma_ctl;
3120 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3121 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3122 
3123 	idedma_ctl = 0;
3124 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3125 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3126 	    acer_fifo_udma), DEBUG_PROBE);
3127 	/* setup DMA if needed */
3128 	pciide_channel_dma_setup(cp);
3129 
3130 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3131 	    DRIVE_UDMA)	{	/* check 80 pins cable */
3132 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3133 		    ACER_0x4A_80PIN(chp->channel)) {
3134 			if (chp->ch_drive[0].UDMA_mode > 2)
3135 				chp->ch_drive[0].UDMA_mode = 2;
3136 			if (chp->ch_drive[1].UDMA_mode > 2)
3137 				chp->ch_drive[1].UDMA_mode = 2;
3138 		}
3139 	}
3140 
3141 	for (drive = 0; drive < 2; drive++) {
3142 		drvp = &chp->ch_drive[drive];
3143 		/* If no drive, skip */
3144 		if ((drvp->drive_flags & DRIVE) == 0)
3145 			continue;
3146 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3147 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3148 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3149 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3150 		/* clear FIFO/DMA mode */
3151 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3152 		    ACER_UDMA_EN(chp->channel, drive) |
3153 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3154 
3155 		/* add timing values, setup DMA if needed */
3156 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3157 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3158 			acer_fifo_udma |=
3159 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3160 			goto pio;
3161 		}
3162 
3163 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3164 		if (drvp->drive_flags & DRIVE_UDMA) {
3165 			/* use Ultra/DMA */
3166 			drvp->drive_flags &= ~DRIVE_DMA;
3167 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3168 			acer_fifo_udma |=
3169 			    ACER_UDMA_TIM(chp->channel, drive,
3170 				acer_udma[drvp->UDMA_mode]);
3171 			/* XXX disable if one drive < UDMA3 ? */
3172 			if (drvp->UDMA_mode >= 3) {
3173 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3174 				    ACER_0x4B,
3175 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3176 				        ACER_0x4B) | ACER_0x4B_UDMA66);
3177 			}
3178 		} else {
3179 			/*
3180 			 * use Multiword DMA
3181 			 * Timings will be used for both PIO and DMA,
3182 			 * so adjust DMA mode if needed
3183 			 */
3184 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3185 				drvp->PIO_mode = drvp->DMA_mode + 2;
3186 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3187 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3188 				    drvp->PIO_mode - 2 : 0;
3189 			if (drvp->DMA_mode == 0)
3190 				drvp->PIO_mode = 0;
3191 		}
3192 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3193 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3194 		    ACER_IDETIM(chp->channel, drive),
3195 		    acer_pio[drvp->PIO_mode]);
3196 	}
3197 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3198 	    acer_fifo_udma), DEBUG_PROBE);
3199 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3200 	if (idedma_ctl != 0) {
3201 		/* Add software bits in status register */
3202 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3203 		    IDEDMA_CTL, idedma_ctl);
3204 	}
3205 	pciide_print_modes(cp);
3206 }
3207 
3208 int
3209 acer_pci_intr(arg)
3210 	void *arg;
3211 {
3212 	struct pciide_softc *sc = arg;
3213 	struct pciide_channel *cp;
3214 	struct channel_softc *wdc_cp;
3215 	int i, rv, crv;
3216 	u_int32_t chids;
3217 
3218 	rv = 0;
3219 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3220 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3221 		cp = &sc->pciide_channels[i];
3222 		wdc_cp = &cp->wdc_channel;
3223 		/* If a compat channel skip. */
3224 		if (cp->compat)
3225 			continue;
3226 		if (chids & ACER_CHIDS_INT(i)) {
3227 			crv = wdcintr(wdc_cp);
3228 			if (crv == 0)
3229 				printf("%s:%d: bogus intr\n",
3230 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3231 			else
3232 				rv = 1;
3233 		}
3234 	}
3235 	return rv;
3236 }
3237 
3238 void
3239 hpt_chip_map(sc, pa)
3240 	struct pciide_softc *sc;
3241 	struct pci_attach_args *pa;
3242 {
3243 	struct pciide_channel *cp;
3244 	int i, compatchan, revision;
3245 	pcireg_t interface;
3246 	bus_size_t cmdsize, ctlsize;
3247 
3248 	if (pciide_chipen(sc, pa) == 0)
3249 		return;
3250 	revision = PCI_REVISION(pa->pa_class);
3251 
3252 	/*
3253 	 * when the chip is in native mode it identifies itself as a
3254 	 * 'misc mass storage'. Fake interface in this case.
3255 	 */
3256 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3257 		interface = PCI_INTERFACE(pa->pa_class);
3258 	} else {
3259 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3260 		    PCIIDE_INTERFACE_PCI(0);
3261 		if (revision == HPT370_REV || revision == HPT370A_REV)
3262 			interface |= PCIIDE_INTERFACE_PCI(1);
3263 	}
3264 
3265 	printf(": DMA");
3266 	pciide_mapreg_dma(sc, pa);
3267 	printf("\n");
3268 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3269 	    WDC_CAPABILITY_MODE;
3270 	if (sc->sc_dma_ok) {
3271 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3272 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3273 		sc->sc_wdcdev.irqack = pciide_irqack;
3274 	}
3275 	sc->sc_wdcdev.PIO_cap = 4;
3276 	sc->sc_wdcdev.DMA_cap = 2;
3277 
3278 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3279 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3280 	if (revision < HPT370_REV) {
3281 		sc->sc_wdcdev.UDMA_cap = 4;
3282 		/*
3283 		 * The 366 has 2 PCI IDE functions, one for primary and one
3284 		 * for secondary. So we need to call pciide_mapregs_compat()
3285 		 * with the real channel
3286 		 */
3287 		if (pa->pa_function == 0) {
3288 			compatchan = 0;
3289 		} else if (pa->pa_function == 1) {
3290 			compatchan = 1;
3291 		} else {
3292 			printf("%s: unexpected PCI function %d\n",
3293 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3294 			return;
3295 		}
3296 		sc->sc_wdcdev.nchannels = 1;
3297 	} else {
3298 		sc->sc_wdcdev.nchannels = 2;
3299 		sc->sc_wdcdev.UDMA_cap = 5;
3300 	}
3301 	 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3302 		cp = &sc->pciide_channels[i];
3303 		if (sc->sc_wdcdev.nchannels > 1) {
3304 			compatchan = i;
3305 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3306 			    HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3307 				printf("%s: %s ignored (disabled)\n",
3308 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3309 				continue;
3310 			}
3311 		}
3312 		if (pciide_chansetup(sc, i, interface) == 0)
3313 			continue;
3314 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3315 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3316 			    &ctlsize, hpt_pci_intr);
3317 		} else {
3318 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3319 			    &cmdsize, &ctlsize);
3320 		}
3321 		if (cp->hw_ok == 0)
3322 			return;
3323 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3324 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3325 		wdcattach(&cp->wdc_channel);
3326 		hpt_setup_channel(&cp->wdc_channel);
3327 	}
3328 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3329 		/*
3330 		 * HPT370_REV has a bit to disable interrupts, make sure
3331 		 * to clear it
3332 		 */
3333 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3334 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3335 		    ~HPT_CSEL_IRQDIS);
3336 	}
3337 	return;
3338 }
3339 
3340 void
3341 hpt_setup_channel(chp)
3342 	struct channel_softc *chp;
3343 {
3344 	struct ata_drive_datas *drvp;
3345 	int drive;
3346 	int cable;
3347 	u_int32_t before, after;
3348 	u_int32_t idedma_ctl;
3349 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3350 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3351 
3352 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3353 
3354 	/* setup DMA if needed */
3355 	pciide_channel_dma_setup(cp);
3356 
3357 	idedma_ctl = 0;
3358 
3359 	/* Per drive settings */
3360 	for (drive = 0; drive < 2; drive++) {
3361 		drvp = &chp->ch_drive[drive];
3362 		/* If no drive, skip */
3363 		if ((drvp->drive_flags & DRIVE) == 0)
3364 			continue;
3365 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3366 					HPT_IDETIM(chp->channel, drive));
3367 
3368 		/* add timing values, setup DMA if needed */
3369 		if (drvp->drive_flags & DRIVE_UDMA) {
3370 			/* use Ultra/DMA */
3371 			drvp->drive_flags &= ~DRIVE_DMA;
3372 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3373 			    drvp->UDMA_mode > 2)
3374 				drvp->UDMA_mode = 2;
3375 			after = (sc->sc_wdcdev.nchannels == 2) ?
3376 			    hpt370_udma[drvp->UDMA_mode] :
3377 			    hpt366_udma[drvp->UDMA_mode];
3378 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3379 		} else if (drvp->drive_flags & DRIVE_DMA) {
3380 			/*
3381 			 * use Multiword DMA.
3382 			 * Timings will be used for both PIO and DMA, so adjust
3383 			 * DMA mode if needed
3384 			 */
3385 			if (drvp->PIO_mode >= 3 &&
3386 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3387 				drvp->DMA_mode = drvp->PIO_mode - 2;
3388 			}
3389 			after = (sc->sc_wdcdev.nchannels == 2) ?
3390 			    hpt370_dma[drvp->DMA_mode] :
3391 			    hpt366_dma[drvp->DMA_mode];
3392 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3393 		} else {
3394 			/* PIO only */
3395 			after = (sc->sc_wdcdev.nchannels == 2) ?
3396 			    hpt370_pio[drvp->PIO_mode] :
3397 			    hpt366_pio[drvp->PIO_mode];
3398 		}
3399 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3400 		    HPT_IDETIM(chp->channel, drive), after);
3401 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3402 		    "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname,
3403 		    after, before), DEBUG_PROBE);
3404 	}
3405 	if (idedma_ctl != 0) {
3406 		/* Add software bits in status register */
3407 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3408 		    IDEDMA_CTL, idedma_ctl);
3409 	}
3410 	pciide_print_modes(cp);
3411 }
3412 
3413 int
3414 hpt_pci_intr(arg)
3415 	void *arg;
3416 {
3417 	struct pciide_softc *sc = arg;
3418 	struct pciide_channel *cp;
3419 	struct channel_softc *wdc_cp;
3420 	int rv = 0;
3421 	int dmastat, i, crv;
3422 
3423 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3424 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3425 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3426 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
3427 		    continue;
3428 		cp = &sc->pciide_channels[i];
3429 		wdc_cp = &cp->wdc_channel;
3430 		crv = wdcintr(wdc_cp);
3431 		if (crv == 0) {
3432 			printf("%s:%d: bogus intr\n",
3433 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3434 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3435 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3436 		} else
3437 			rv = 1;
3438 	}
3439 	return rv;
3440 }
3441 
3442 
3443 /* Macros to test product */
3444 #define PDC_IS_262(sc)							\
3445 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 ||	\
3446 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265  ||	\
3447 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267)
3448 #define PDC_IS_265(sc)							\
3449 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 ||	\
3450         (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267)
3451 
3452 
3453 
3454 void
3455 pdc202xx_chip_map(sc, pa)
3456 	struct pciide_softc *sc;
3457 	struct pci_attach_args *pa;
3458 {
3459 	struct pciide_channel *cp;
3460 	int channel;
3461 	pcireg_t interface, st, mode;
3462 	bus_size_t cmdsize, ctlsize;
3463 
3464 	st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3465 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", st),
3466 	    DEBUG_PROBE);
3467 	if (pciide_chipen(sc, pa) == 0)
3468 		return;
3469 
3470 	/* turn off  RAID mode */
3471 	st &= ~PDC2xx_STATE_IDERAID;
3472 
3473 	/*
3474  	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3475 	 * mode. We have to fake interface
3476 	 */
3477 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3478 	if (st & PDC2xx_STATE_NATIVE)
3479 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3480 
3481 	printf(": DMA");
3482 	pciide_mapreg_dma(sc, pa);
3483 
3484 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3485 	    WDC_CAPABILITY_MODE | WDC_CAPABILITY_NO_ATAPI_DMA;
3486 	if (sc->sc_dma_ok) {
3487 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3488 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3489 		sc->sc_wdcdev.irqack = pciide_irqack;
3490 	}
3491 	sc->sc_wdcdev.PIO_cap = 4;
3492 	sc->sc_wdcdev.DMA_cap = 2;
3493 	if (PDC_IS_265(sc))
3494 		sc->sc_wdcdev.UDMA_cap = 5;
3495 	else if (PDC_IS_262(sc))
3496 		sc->sc_wdcdev.UDMA_cap = 4;
3497 	else
3498 		sc->sc_wdcdev.UDMA_cap = 2;
3499 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3500 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3501 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3502 
3503 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3504 
3505 	/* setup failsafe defaults */
3506 	mode = 0;
3507 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3508 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3509 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3510 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3511 
3512 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3513 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3514 		    "initial timings  0x%x, now 0x%x\n", channel,
3515 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3516 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3517 		    DEBUG_PROBE);
3518 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3519 		    mode | PDC2xx_TIM_IORDYp);
3520 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3521 		    "initial timings  0x%x, now 0x%x\n", channel,
3522 		pci_conf_read(sc->sc_pc, sc->sc_tag,
3523 	 	    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3524 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3525 		    mode);
3526 	}
3527 
3528 	mode = PDC2xx_SCR_DMA;
3529 	if (PDC_IS_262(sc)) {
3530 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3531 	} else {
3532 		/* the BIOS set it up this way */
3533 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3534 	}
3535 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3536 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3537 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3538 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3539 	    DEBUG_PROBE);
3540 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3541 
3542 	/* controller initial state register is OK even without BIOS */
3543 	/* Set DMA mode to IDE DMA compatibility */
3544 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3545 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3546 	    DEBUG_PROBE);
3547 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3548 	    mode | 0x1);
3549 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3550 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3551 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3552 	    mode | 0x1);
3553 
3554 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3555 		cp = &sc->pciide_channels[channel];
3556 		if (pciide_chansetup(sc, channel, interface) == 0)
3557 			continue;
3558 		if ((st & (PDC_IS_262(sc) ?
3559 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3560 			printf("%s: %s ignored (disabled)\n",
3561 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3562 			continue;
3563 		}
3564 		pciide_map_compat_intr(pa, cp, channel, interface);
3565 		if (cp->hw_ok == 0)
3566 			continue;
3567 		if (PDC_IS_265(sc))
3568 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3569 			    pdc20265_pci_intr);
3570 		else
3571 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3572 			    pdc202xx_pci_intr);
3573 		if (cp->hw_ok == 0) {
3574 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3575 			continue;
3576 		}
3577 		if (pciide_chan_candisable(cp)) {
3578 			st &= ~(PDC_IS_262(sc) ?
3579 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3580 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3581 		}
3582 		pdc202xx_setup_channel(&cp->wdc_channel);
3583         }
3584 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state 0x%x\n", st),
3585 	    DEBUG_PROBE);
3586 	pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3587 	return;
3588 }
3589 
3590 void
3591 pdc202xx_setup_channel(chp)
3592 	struct channel_softc *chp;
3593 {
3594 	struct ata_drive_datas *drvp;
3595 	int drive;
3596 	pcireg_t mode, st;
3597 	u_int32_t idedma_ctl, scr, atapi;
3598 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3599 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3600 	int channel = chp->channel;
3601 
3602 	/* setup DMA if needed */
3603 	pciide_channel_dma_setup(cp);
3604 
3605 	idedma_ctl = 0;
3606 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3607 	    sc->sc_wdcdev.sc_dev.dv_xname,
3608 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3609 	    DEBUG_PROBE);
3610 
3611 	/* Per channel settings */
3612 	if (PDC_IS_262(sc)) {
3613 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3614 		    PDC262_U66);
3615 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3616 		/* Trim UDMA mode */
3617 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3618 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3619 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3620 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3621 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3622 			if (chp->ch_drive[0].UDMA_mode > 2)
3623 				chp->ch_drive[0].UDMA_mode = 2;
3624 			if (chp->ch_drive[1].UDMA_mode > 2)
3625 				chp->ch_drive[1].UDMA_mode = 2;
3626 		}
3627 		/* Set U66 if needed */
3628 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3629 		    chp->ch_drive[0].UDMA_mode > 2) ||
3630 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3631 		    chp->ch_drive[1].UDMA_mode > 2))
3632 			scr |= PDC262_U66_EN(channel);
3633 		else
3634 			scr &= ~PDC262_U66_EN(channel);
3635 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3636 		    PDC262_U66, scr);
3637 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3638 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3639 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3640 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3641 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3642 		    chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3643 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3644 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3645 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3646 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3647 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3648 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3649 				atapi = 0;
3650 			else
3651 				atapi = PDC262_ATAPI_UDMA;
3652 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3653 			    PDC262_ATAPI(channel), atapi);
3654 		}
3655 	}
3656 	for (drive = 0; drive < 2; drive++) {
3657 		drvp = &chp->ch_drive[drive];
3658 		/* If no drive, skip */
3659 		if ((drvp->drive_flags & DRIVE) == 0)
3660 			continue;
3661 		mode = 0;
3662 		if (drvp->drive_flags & DRIVE_UDMA) {
3663 			/* use Ultra/DMA */
3664 			drvp->drive_flags &= ~DRIVE_DMA;
3665 			mode = PDC2xx_TIM_SET_MB(mode,
3666 			   pdc2xx_udma_mb[drvp->UDMA_mode]);
3667 			mode = PDC2xx_TIM_SET_MC(mode,
3668 			   pdc2xx_udma_mc[drvp->UDMA_mode]);
3669 			drvp->drive_flags &= ~DRIVE_DMA;
3670 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3671 		} else if (drvp->drive_flags & DRIVE_DMA) {
3672 			mode = PDC2xx_TIM_SET_MB(mode,
3673 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3674 			mode = PDC2xx_TIM_SET_MC(mode,
3675 			   pdc2xx_dma_mc[drvp->DMA_mode]);
3676 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3677 		} else {
3678 			mode = PDC2xx_TIM_SET_MB(mode,
3679 			    pdc2xx_dma_mb[0]);
3680 			mode = PDC2xx_TIM_SET_MC(mode,
3681 			    pdc2xx_dma_mc[0]);
3682 		}
3683 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3684 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3685 		if (drvp->drive_flags & DRIVE_ATA)
3686 			mode |= PDC2xx_TIM_PRE;
3687 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3688 		if (drvp->PIO_mode >= 3) {
3689 			mode |= PDC2xx_TIM_IORDY;
3690 			if (drive == 0)
3691 				mode |= PDC2xx_TIM_IORDYp;
3692 		}
3693 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3694 		    "timings 0x%x\n",
3695 		    sc->sc_wdcdev.sc_dev.dv_xname,
3696 		    chp->channel, drive, mode), DEBUG_PROBE);
3697 		    pci_conf_write(sc->sc_pc, sc->sc_tag,
3698 		    PDC2xx_TIM(chp->channel, drive), mode);
3699 	}
3700 	if (idedma_ctl != 0) {
3701 		/* Add software bits in status register */
3702 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3703 		    IDEDMA_CTL, idedma_ctl);
3704 	}
3705 	pciide_print_modes(cp);
3706 }
3707 
3708 int
3709 pdc202xx_pci_intr(arg)
3710 	void *arg;
3711 {
3712 	struct pciide_softc *sc = arg;
3713 	struct pciide_channel *cp;
3714 	struct channel_softc *wdc_cp;
3715 	int i, rv, crv;
3716 	u_int32_t scr;
3717 
3718 	rv = 0;
3719 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3720 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3721 		cp = &sc->pciide_channels[i];
3722 		wdc_cp = &cp->wdc_channel;
3723 		/* If a compat channel skip. */
3724 		if (cp->compat)
3725 			continue;
3726 		if (scr & PDC2xx_SCR_INT(i)) {
3727 			crv = wdcintr(wdc_cp);
3728 			if (crv == 0)
3729 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3730 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3731 			else
3732 				rv = 1;
3733 		}
3734         }
3735 	return rv;
3736 }
3737 
3738 int
3739 pdc20265_pci_intr(arg)
3740 	void *arg;
3741 {
3742 	struct pciide_softc *sc = arg;
3743 	struct pciide_channel *cp;
3744 	struct channel_softc *wdc_cp;
3745 	int i, rv, crv;
3746 	u_int32_t dmastat;
3747 
3748 	rv = 0;
3749 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3750 		cp = &sc->pciide_channels[i];
3751 		wdc_cp = &cp->wdc_channel;
3752 		/* If a compat channel skip. */
3753 		if (cp->compat)
3754 			continue;
3755 		/*
3756 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3757 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3758 		 * So use it instead (requires 2 reg reads instead of 1,
3759 		 * but we can't do it another way).
3760 		 */
3761 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3762 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3763 		if((dmastat & IDEDMA_CTL_INTR) == 0)
3764 		    continue;
3765 
3766 		crv = wdcintr(wdc_cp);
3767 		if (crv == 0)
3768 			printf("%s:%d: bogus intr\n",
3769 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3770 		else
3771 			rv = 1;
3772 	}
3773 	return rv;
3774 }
3775 
3776 /*
3777  * Inline functions for accessing the timing registers of the
3778  * OPTi controller.
3779  *
3780  * These *MUST* disable interrupts as they need atomic access to
3781  * certain magic registers. Failure to adhere to this *will*
3782  * break things in subtle ways if the wdc registers are accessed
3783  * by an interrupt routine while this magic sequence is executing.
3784  */
3785 static __inline__ u_int8_t
3786 opti_read_config(struct channel_softc *chp, int reg)
3787 {
3788 	u_int8_t rv;
3789 	int s = splhigh();
3790 
3791 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
3792 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3793 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3794 
3795 	/* Followed by an 8-bit write of 0x3 to register #2 */
3796 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
3797 
3798 	/* Now we can read the required register */
3799 	rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg);
3800 
3801 	/* Restore the real registers */
3802 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
3803 
3804 	splx(s);
3805 
3806 	return rv;
3807 }
3808 
3809 static __inline__ void
3810 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val)
3811 {
3812 	int s = splhigh();
3813 
3814 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
3815 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3816 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3817 
3818 	/* Followed by an 8-bit write of 0x3 to register #2 */
3819 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
3820 
3821 	/* Now we can write the required register */
3822 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val);
3823 
3824 	/* Restore the real registers */
3825 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
3826 
3827 	splx(s);
3828 }
3829 
3830 void
3831 opti_chip_map(sc, pa)
3832 	struct pciide_softc *sc;
3833 	struct pci_attach_args *pa;
3834 {
3835 	struct pciide_channel *cp;
3836 	bus_size_t cmdsize, ctlsize;
3837 	pcireg_t interface;
3838 	u_int8_t init_ctrl;
3839 	int channel;
3840 
3841 	if (pciide_chipen(sc, pa) == 0)
3842 		return;
3843 	printf(": DMA");
3844 	/*
3845 	 * XXXSCW:
3846 	 * There seem to be a couple of buggy revisions/implementations
3847 	 * of the OPTi pciide chipset. This kludge seems to fix one of
3848 	 * the reported problems (NetBSD PR/11644) but still fails for the
3849 	 * other (NetBSD PR/13151), although the latter may be due to other
3850 	 * issues too...
3851 	 */
3852 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
3853 		printf(" (disabled)");
3854 		sc->sc_dma_ok = 0;
3855 		sc->sc_wdcdev.cap = 0;
3856 	} else {
3857 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
3858 		pciide_mapreg_dma(sc, pa);
3859 	}
3860 
3861 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
3862 	sc->sc_wdcdev.PIO_cap = 4;
3863 	if (sc->sc_dma_ok) {
3864 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3865 		sc->sc_wdcdev.irqack = pciide_irqack;
3866 		sc->sc_wdcdev.DMA_cap = 2;
3867 	}
3868 	sc->sc_wdcdev.set_modes = opti_setup_channel;
3869 
3870 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3871 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3872 
3873 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
3874 	    OPTI_REG_INIT_CONTROL);
3875 
3876 	interface = PCI_INTERFACE(pa->pa_class);
3877 
3878         pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3879 
3880 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3881 		cp = &sc->pciide_channels[channel];
3882 		if (pciide_chansetup(sc, channel, interface) == 0)
3883 			continue;
3884 		if (channel == 1 &&
3885 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
3886 			printf("%s: %s channel ignored (disabled)\n",
3887 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3888 			continue;
3889 		}
3890 		pciide_map_compat_intr(pa, cp, channel, interface);
3891 		if (cp->hw_ok == 0)
3892 			continue;
3893 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3894 		    pciide_pci_intr);
3895 		if (cp->hw_ok == 0) {
3896 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3897 			continue;
3898 		}
3899 		opti_setup_channel(&cp->wdc_channel);
3900 	}
3901 }
3902 
3903 void
3904 opti_setup_channel(chp)
3905 	struct channel_softc *chp;
3906 {
3907 	struct ata_drive_datas *drvp;
3908 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3909 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3910 	int drive,spd;
3911 	int mode[2];
3912 	u_int8_t rv, mr;
3913 
3914 	/*
3915 	 * The `Delay' and `Address Setup Time' fields of the
3916 	 * Miscellaneous Register are always zero initially.
3917 	 */
3918 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
3919 	mr &= ~(OPTI_MISC_DELAY_MASK |
3920 		OPTI_MISC_ADDR_SETUP_MASK |
3921 		OPTI_MISC_INDEX_MASK);
3922 
3923 	/* Prime the control register before setting timing values */
3924 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
3925 
3926 	/* Determine the clockrate of the PCIbus the chip is attached to */
3927 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
3928 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
3929 
3930 	/* setup DMA if needed */
3931 	pciide_channel_dma_setup(cp);
3932 
3933 	for (drive = 0; drive < 2; drive++) {
3934 		drvp = &chp->ch_drive[drive];
3935 		/* If no drive, skip */
3936 		if ((drvp->drive_flags & DRIVE) == 0) {
3937 			mode[drive] = -1;
3938 			continue;
3939 		}
3940 
3941 		if ((drvp->drive_flags & DRIVE_DMA)) {
3942 			/*
3943 			 * Timings will be used for both PIO and DMA,
3944 			 * so adjust DMA mode if needed
3945 			 */
3946 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3947 				drvp->PIO_mode = drvp->DMA_mode + 2;
3948 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3949 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3950 				    drvp->PIO_mode - 2 : 0;
3951 			if (drvp->DMA_mode == 0)
3952 				drvp->PIO_mode = 0;
3953 
3954 			mode[drive] = drvp->DMA_mode + 5;
3955 		} else
3956 			mode[drive] = drvp->PIO_mode;
3957 
3958 		if (drive && mode[0] >= 0 &&
3959 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
3960 			/*
3961 			 * Can't have two drives using different values
3962 			 * for `Address Setup Time'.
3963 			 * Slow down the faster drive to compensate.
3964 			 */
3965 			int d = (opti_tim_as[spd][mode[0]] >
3966 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
3967 
3968 			mode[d] = mode[1-d];
3969 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
3970 			chp->ch_drive[d].DMA_mode = 0;
3971 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
3972 		}
3973 	}
3974 
3975 	for (drive = 0; drive < 2; drive++) {
3976 		int m;
3977 		if ((m = mode[drive]) < 0)
3978 			continue;
3979 
3980 		/* Set the Address Setup Time and select appropriate index */
3981 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
3982 		rv |= OPTI_MISC_INDEX(drive);
3983 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
3984 
3985 		/* Set the pulse width and recovery timing parameters */
3986 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
3987 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
3988 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
3989 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
3990 
3991 		/* Set the Enhanced Mode register appropriately */
3992 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
3993 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
3994 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
3995 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
3996 	}
3997 
3998 	/* Finally, enable the timings */
3999 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4000 
4001 	pciide_print_modes(cp);
4002 }
4003 
4004 
4005 #define	ACARD_IS_850(sc)							\
4006 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4007 
4008 void
4009 acard_chip_map(sc, pa)
4010 	struct pciide_softc *sc;
4011 	struct pci_attach_args *pa;
4012 {
4013 	struct pciide_channel *cp;
4014 	int i;
4015 	pcireg_t interface;
4016 	bus_size_t cmdsize, ctlsize;
4017 
4018 	if (pciide_chipen(sc, pa) == 0)
4019 		return;
4020 
4021 	/*
4022 	 * when the chip is in native mode it identifies itself as a
4023 	 * 'misc mass storage'. Fake interface in this case.
4024 	 */
4025 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4026 		interface = PCI_INTERFACE(pa->pa_class);
4027 	} else {
4028 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4029 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4030 	}
4031 
4032 	printf(": DMA");
4033 	pciide_mapreg_dma(sc, pa);
4034 	printf("\n");
4035 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4036 	    WDC_CAPABILITY_MODE;
4037 
4038 	if (sc->sc_dma_ok) {
4039 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4040 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4041 		sc->sc_wdcdev.irqack = pciide_irqack;
4042 	}
4043 	sc->sc_wdcdev.PIO_cap = 4;
4044 	sc->sc_wdcdev.DMA_cap = 2;
4045 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4046 
4047 	sc->sc_wdcdev.set_modes = acard_setup_channel;
4048 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
4049 	sc->sc_wdcdev.nchannels = 2;
4050 
4051 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4052 		cp = &sc->pciide_channels[i];
4053 		if (pciide_chansetup(sc, i, interface) == 0)
4054 			continue;
4055 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
4056 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4057 			    &ctlsize, pciide_pci_intr);
4058 		} else {
4059 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4060 			    &cmdsize, &ctlsize);
4061 		}
4062 		if (cp->hw_ok == 0)
4063 			return;
4064 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4065 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4066 		wdcattach(&cp->wdc_channel);
4067 		acard_setup_channel(&cp->wdc_channel);
4068 	}
4069 	if (!ACARD_IS_850(sc)) {
4070 		u_int32_t reg;
4071 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4072 		reg &= ~ATP860_CTRL_INT;
4073 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4074 	}
4075 }
4076 
4077 void
4078 acard_setup_channel(chp)
4079 	struct channel_softc *chp;
4080 {
4081 	struct ata_drive_datas *drvp;
4082 	struct pciide_channel *cp = (struct pciide_channel*)chp;
4083 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4084 	int channel = chp->channel;
4085 	int drive;
4086 	u_int32_t idetime, udma_mode;
4087 	u_int32_t idedma_ctl;
4088 
4089 	/* setup DMA if needed */
4090 	pciide_channel_dma_setup(cp);
4091 
4092 	if (ACARD_IS_850(sc)) {
4093 		idetime = 0;
4094 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4095 		udma_mode &= ~ATP850_UDMA_MASK(channel);
4096 	} else {
4097 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4098 		idetime &= ~ATP860_SETTIME_MASK(channel);
4099 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4100 		udma_mode &= ~ATP860_UDMA_MASK(channel);
4101 	}
4102 
4103 	idedma_ctl = 0;
4104 
4105 	/* Per drive settings */
4106 	for (drive = 0; drive < 2; drive++) {
4107 		drvp = &chp->ch_drive[drive];
4108 		/* If no drive, skip */
4109 		if ((drvp->drive_flags & DRIVE) == 0)
4110 			continue;
4111 		/* add timing values, setup DMA if needed */
4112 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4113 		    (drvp->drive_flags & DRIVE_UDMA)) {
4114 			/* use Ultra/DMA */
4115 			if (ACARD_IS_850(sc)) {
4116 				idetime |= ATP850_SETTIME(drive,
4117 				    acard_act_udma[drvp->UDMA_mode],
4118 				    acard_rec_udma[drvp->UDMA_mode]);
4119 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
4120 				    acard_udma_conf[drvp->UDMA_mode]);
4121 			} else {
4122 				idetime |= ATP860_SETTIME(channel, drive,
4123 				    acard_act_udma[drvp->UDMA_mode],
4124 				    acard_rec_udma[drvp->UDMA_mode]);
4125 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
4126 				    acard_udma_conf[drvp->UDMA_mode]);
4127 			}
4128 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4129 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4130 		    (drvp->drive_flags & DRIVE_DMA)) {
4131 			/* use Multiword DMA */
4132 			drvp->drive_flags &= ~DRIVE_UDMA;
4133 			if (ACARD_IS_850(sc)) {
4134 				idetime |= ATP850_SETTIME(drive,
4135 				    acard_act_dma[drvp->DMA_mode],
4136 				    acard_rec_dma[drvp->DMA_mode]);
4137 			} else {
4138 				idetime |= ATP860_SETTIME(channel, drive,
4139 				    acard_act_dma[drvp->DMA_mode],
4140 				    acard_rec_dma[drvp->DMA_mode]);
4141 			}
4142 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4143 		} else {
4144 			/* PIO only */
4145 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4146 			if (ACARD_IS_850(sc)) {
4147 				idetime |= ATP850_SETTIME(drive,
4148 				    acard_act_pio[drvp->PIO_mode],
4149 				    acard_rec_pio[drvp->PIO_mode]);
4150 			} else {
4151 				idetime |= ATP860_SETTIME(channel, drive,
4152 				    acard_act_pio[drvp->PIO_mode],
4153 				    acard_rec_pio[drvp->PIO_mode]);
4154 			}
4155 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4156 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4157 		    | ATP8x0_CTRL_EN(channel));
4158 		}
4159 	}
4160 
4161 	if (idedma_ctl != 0) {
4162 		/* Add software bits in status register */
4163 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4164 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4165 	}
4166 	pciide_print_modes(cp);
4167 
4168 	if (ACARD_IS_850(sc)) {
4169 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4170 		    ATP850_IDETIME(channel), idetime);
4171 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4172 	} else {
4173 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4174 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4175 	}
4176 }
4177 
4178 int
4179 acard_pci_intr(arg)
4180 	void *arg;
4181 {
4182 	struct pciide_softc *sc = arg;
4183 	struct pciide_channel *cp;
4184 	struct channel_softc *wdc_cp;
4185 	int rv = 0;
4186 	int dmastat, i, crv;
4187 
4188 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4189 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4190 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4191 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4192 			continue;
4193 		cp = &sc->pciide_channels[i];
4194 		wdc_cp = &cp->wdc_channel;
4195 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4196 			(void)wdcintr(wdc_cp);
4197 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4198 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4199 			continue;
4200 		}
4201 		crv = wdcintr(wdc_cp);
4202 		if (crv == 0)
4203 			printf("%s:%d: bogus intr\n",
4204 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4205 		else if (crv == 1)
4206 			rv = 1;
4207 		else if (rv == 0)
4208 			rv = crv;
4209 	}
4210 	return rv;
4211 }
4212