xref: /openbsd-src/sys/dev/pci/pciide.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: pciide.c,v 1.76 2001/12/11 22:04:12 chris Exp $	*/
2 /*	$NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $	*/
3 
4 /*
5  * Copyright (c) 1999, 2000, 2001 Manuel Bouyer.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  *
34  */
35 
36 /*
37  * Copyright (c) 1996, 1998 Christopher G. Demetriou.  All rights reserved.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *      This product includes software developed by Christopher G. Demetriou
50  *	for the NetBSD Project.
51  * 4. The name of the author may not be used to endorse or promote products
52  *    derived from this software without specific prior written permission
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
55  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
56  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
57  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
58  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
59  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
63  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64  */
65 
66 /*
67  * PCI IDE controller driver.
68  *
69  * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD
70  * sys/dev/pci/ppb.c, revision 1.16).
71  *
72  * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and
73  * "Programming Interface for Bus Master IDE Controller, Revision 1.0
74  * 5/16/94" from the PCI SIG.
75  *
76  */
77 
78 #define DEBUG_DMA   0x01
79 #define DEBUG_XFERS  0x02
80 #define DEBUG_FUNCS  0x08
81 #define DEBUG_PROBE  0x10
82 
83 #ifdef WDCDEBUG
84 int wdcdebug_pciide_mask = 0;
85 #define WDCDEBUG_PRINT(args, level) \
86 	if (wdcdebug_pciide_mask & (level)) printf args
87 #else
88 #define WDCDEBUG_PRINT(args, level)
89 #endif
90 #include <sys/param.h>
91 #include <sys/systm.h>
92 #include <sys/device.h>
93 #include <sys/malloc.h>
94 
95 #include <uvm/uvm_extern.h>
96 
97 #include <machine/endian.h>
98 
99 #include <dev/pci/pcireg.h>
100 #include <dev/pci/pcivar.h>
101 #include <dev/pci/pcidevs.h>
102 #include <dev/pci/pciidereg.h>
103 #include <dev/pci/pciidevar.h>
104 #include <dev/pci/pciide_piix_reg.h>
105 #include <dev/pci/pciide_amd_reg.h>
106 #include <dev/pci/pciide_apollo_reg.h>
107 #include <dev/pci/pciide_cmd_reg.h>
108 #include <dev/pci/pciide_cy693_reg.h>
109 #include <dev/pci/pciide_sis_reg.h>
110 #include <dev/pci/pciide_acer_reg.h>
111 #include <dev/pci/pciide_pdc202xx_reg.h>
112 #include <dev/pci/pciide_opti_reg.h>
113 #include <dev/pci/pciide_hpt_reg.h>
114 #include <dev/pci/pciide_acard_reg.h>
115 #include <dev/pci/pciide_natsemi_reg.h>
116 #include <dev/pci/cy82c693var.h>
117 
118 #include <dev/ata/atavar.h>
119 #include <dev/ic/wdcreg.h>
120 #include <dev/ic/wdcvar.h>
121 
122 /* inlines for reading/writing 8-bit PCI registers */
123 static __inline u_int8_t pciide_pci_read __P((pci_chipset_tag_t, pcitag_t,
124 					      int));
125 static __inline void pciide_pci_write __P((pci_chipset_tag_t, pcitag_t,
126 					   int, u_int8_t));
127 
128 static __inline u_int8_t
129 pciide_pci_read(pc, pa, reg)
130 	pci_chipset_tag_t pc;
131 	pcitag_t pa;
132 	int reg;
133 {
134 
135 	return (pci_conf_read(pc, pa, (reg & ~0x03)) >>
136 	    ((reg & 0x03) * 8) & 0xff);
137 }
138 
139 static __inline void
140 pciide_pci_write(pc, pa, reg, val)
141 	pci_chipset_tag_t pc;
142 	pcitag_t pa;
143 	int reg;
144 	u_int8_t val;
145 {
146 	pcireg_t pcival;
147 
148 	pcival = pci_conf_read(pc, pa, (reg & ~0x03));
149 	pcival &= ~(0xff << ((reg & 0x03) * 8));
150 	pcival |= (val << ((reg & 0x03) * 8));
151 	pci_conf_write(pc, pa, (reg & ~0x03), pcival);
152 }
153 
154 struct pciide_softc {
155 	struct wdc_softc	sc_wdcdev;	/* common wdc definitions */
156 	pci_chipset_tag_t	sc_pc;		/* PCI registers info */
157 	pcitag_t		sc_tag;
158 	void			*sc_pci_ih;	/* PCI interrupt handle */
159 	int			sc_dma_ok;	/* bus-master DMA info */
160 	bus_space_tag_t		sc_dma_iot;
161 	bus_space_handle_t	sc_dma_ioh;
162 	bus_dma_tag_t		sc_dmat;
163 
164 	/* For Cypress */
165 	const struct cy82c693_handle *sc_cy_handle;
166 	int sc_cy_compatchan;
167 
168 	/* Chip description */
169 	const struct pciide_product_desc *sc_pp;
170 	/* common definitions */
171 	struct channel_softc *wdc_chanarray[PCIIDE_NUM_CHANNELS];
172 	/* internal bookkeeping */
173 	struct pciide_channel {			/* per-channel data */
174 		struct channel_softc wdc_channel; /* generic part */
175 		char		*name;
176 		int		hw_ok;		/* hardware mapped & OK? */
177 		int		compat;		/* is it compat? */
178 		int             dma_in_progress;
179 		void		*ih;		/* compat or pci handle */
180 		bus_space_handle_t ctl_baseioh;	/* ctrl regs blk, native mode */
181 		/* DMA tables and DMA map for xfer, for each drive */
182 		struct pciide_dma_maps {
183 			bus_dmamap_t    dmamap_table;
184 			struct idedma_table *dma_table;
185 			bus_dmamap_t    dmamap_xfer;
186 			int dma_flags;
187 		} dma_maps[2];
188 	} pciide_channels[PCIIDE_NUM_CHANNELS];
189 };
190 
191 void default_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
192 
193 void piix_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
194 void piix_setup_channel __P((struct channel_softc*));
195 void piix3_4_setup_channel __P((struct channel_softc*));
196 
197 static u_int32_t piix_setup_idetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
198 static u_int32_t piix_setup_idetim_drvs __P((struct ata_drive_datas*));
199 static u_int32_t piix_setup_sidetim_timings __P((u_int8_t, u_int8_t, u_int8_t));
200 
201 void amd756_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
202 void amd756_setup_channel __P((struct channel_softc*));
203 
204 void apollo_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
205 void apollo_setup_channel __P((struct channel_softc*));
206 
207 void cmd_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
208 void cmd0643_9_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
209 void cmd0643_9_setup_channel __P((struct channel_softc*));
210 void cmd_channel_map __P((struct pci_attach_args *,
211 			struct pciide_softc *, int));
212 int  cmd_pci_intr __P((void *));
213 void cmd646_9_irqack __P((struct channel_softc *));
214 
215 void cy693_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
216 void cy693_setup_channel __P((struct channel_softc*));
217 
218 void sis_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
219 void sis_setup_channel __P((struct channel_softc*));
220 
221 void natsemi_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
222 void natsemi_setup_channel __P((struct channel_softc*));
223 int  natsemi_pci_intr __P((void *));
224 
225 void acer_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
226 void acer_setup_channel __P((struct channel_softc*));
227 int  acer_pci_intr __P((void *));
228 
229 void pdc202xx_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
230 void pdc202xx_setup_channel __P((struct channel_softc*));
231 int  pdc202xx_pci_intr __P((void *));
232 int  pdc20265_pci_intr __P((void *));
233 
234 void opti_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
235 void opti_setup_channel __P((struct channel_softc*));
236 
237 void hpt_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
238 void hpt_setup_channel __P((struct channel_softc*));
239 int  hpt_pci_intr __P((void *));
240 
241 void acard_chip_map __P((struct pciide_softc*, struct pci_attach_args*));
242 void acard_setup_channel __P((struct channel_softc*));
243 int  acard_pci_intr __P((void *));
244 
245 void pciide_channel_dma_setup __P((struct pciide_channel *));
246 int  pciide_dma_table_setup __P((struct pciide_softc*, int, int));
247 int  pciide_dma_init __P((void*, int, int, void *, size_t, int));
248 void pciide_dma_start __P((void*, int, int));
249 int  pciide_dma_finish __P((void*, int, int));
250 void pciide_irqack __P((struct channel_softc *));
251 void pciide_print_modes __P((struct pciide_channel *));
252 void pciide_print_channels __P((int, pcireg_t));;
253 
254 struct pciide_product_desc {
255 	u_int32_t ide_product;
256 	u_short ide_flags;
257 	/* map and setup chip, probe drives */
258 	void (*chip_map) __P((struct pciide_softc*, struct pci_attach_args*));
259 };
260 
261 /* Flags for ide_flags */
262 #define IDE_PCI_CLASS_OVERRIDE	0x0001	/* accept even if class != pciide */
263 #define IDE_16BIT_IOSPACE	0x0002	/* I/O space BARS ignore upper word */
264 
265 /* Default product description for devices not known from this controller */
266 const struct pciide_product_desc default_product_desc = {
267 	0,				/* Generic PCI IDE controller */
268 	0,
269 	default_chip_map
270 };
271 
272 const struct pciide_product_desc pciide_intel_products[] =  {
273 	{ PCI_PRODUCT_INTEL_82092AA,	/* Intel 82092AA IDE */
274 	  0,
275 	  default_chip_map
276 	},
277 	{ PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */
278 	  0,
279 	  piix_chip_map
280 	},
281 	{ PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */
282 	  0,
283 	  piix_chip_map
284 	},
285 	{ PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */
286 	  0,
287 	  piix_chip_map
288 	},
289 	{ PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */
290 	  0,
291 	  piix_chip_map
292 	},
293 	{ PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */
294 	  0,
295 	  piix_chip_map
296 	},
297 	{ PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */
298 	  0,
299 	  piix_chip_map
300 	},
301 	{ PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */
302 	  0,
303 	  piix_chip_map
304 	},
305 	{ PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */
306 	  0,
307 	  piix_chip_map
308 	},
309 };
310 
311 const struct pciide_product_desc pciide_amd_products[] =  {
312 	{ PCI_PRODUCT_AMD_PBC756_IDE,	/* AMD 756 */
313 	  0,
314 	  amd756_chip_map
315 	},
316 	{ PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */
317 	  0,
318 	  amd756_chip_map
319 	},
320 };
321 
322 #ifdef notyet
323 const struct pciide_product_desc pciide_opti_products[] = {
324 
325 	{ PCI_PRODUCT_OPTI_82C621,
326 	  0,
327 	  opti_chip_map
328 	},
329 	{ PCI_PRODUCT_OPTI_82C568,
330 	  0,
331 	  opti_chip_map
332 	},
333 	{ PCI_PRODUCT_OPTI_82D568,
334 	  0,
335 	  opti_chip_map
336 	},
337 };
338 #endif
339 
340 const struct pciide_product_desc pciide_cmd_products[] =  {
341 	{ PCI_PRODUCT_CMDTECH_640,	/* CMD Technology PCI0640 */
342 	  0,
343 	  cmd_chip_map
344 	},
345 	{ PCI_PRODUCT_CMDTECH_643,	/* CMD Technology PCI0643 */
346 	  0,
347 	  cmd0643_9_chip_map
348 	},
349 	{ PCI_PRODUCT_CMDTECH_646,	/* CMD Technology PCI0646 */
350 	  0,
351 	  cmd0643_9_chip_map
352 	},
353 	{ PCI_PRODUCT_CMDTECH_648,	/* CMD Technology PCI0648 */
354 	  IDE_PCI_CLASS_OVERRIDE,
355 	  cmd0643_9_chip_map
356 	},
357 	{ PCI_PRODUCT_CMDTECH_649,	/* CMD Technology PCI0649 */
358 	  IDE_PCI_CLASS_OVERRIDE,
359 	  cmd0643_9_chip_map
360 	}
361 };
362 
363 const struct pciide_product_desc pciide_via_products[] =  {
364 	{ PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */
365 	  0,
366 	  apollo_chip_map
367 	},
368 	{ PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */
369 	  0,
370 	  apollo_chip_map
371 	}
372 };
373 
374 const struct pciide_product_desc pciide_cypress_products[] =  {
375 	{ PCI_PRODUCT_CONTAQ_82C693,	/* Contaq CY82C693 IDE */
376 	  IDE_16BIT_IOSPACE,
377 	  cy693_chip_map
378 	}
379 };
380 
381 const struct pciide_product_desc pciide_sis_products[] =  {
382 	{ PCI_PRODUCT_SIS_5513,		/* SIS 5513 EIDE */
383 	  0,
384 	  sis_chip_map
385 	}
386 };
387 
388 const struct pciide_product_desc pciide_natsemi_products[] =  {
389 	{ PCI_PRODUCT_NS_PC87415,	/* National Semi PC87415 IDE */
390 	  0,
391 	  natsemi_chip_map
392 	}
393 };
394 
395 const struct pciide_product_desc pciide_acer_products[] =  {
396 	{ PCI_PRODUCT_ALI_M5229,	/* Acer Labs M5229 UDMA IDE */
397 	  0,
398 	  acer_chip_map
399 	}
400 };
401 
402 const struct pciide_product_desc pciide_triones_products[] =  {
403 	{ PCI_PRODUCT_TRIONES_HPT366,	/* Highpoint HPT36x/37x IDE */
404 	  IDE_PCI_CLASS_OVERRIDE,
405 	  hpt_chip_map,
406 	}
407 };
408 
409 const struct pciide_product_desc pciide_promise_products[] =  {
410 	{ PCI_PRODUCT_PROMISE_PDC20246,
411 	  IDE_PCI_CLASS_OVERRIDE,
412 	  pdc202xx_chip_map,
413 	},
414 	{ PCI_PRODUCT_PROMISE_PDC20262,
415 	  IDE_PCI_CLASS_OVERRIDE,
416 	  pdc202xx_chip_map,
417 	},
418 	{ PCI_PRODUCT_PROMISE_PDC20265,
419 	  IDE_PCI_CLASS_OVERRIDE,
420 	  pdc202xx_chip_map,
421 	},
422 	{ PCI_PRODUCT_PROMISE_PDC20267,
423 	  IDE_PCI_CLASS_OVERRIDE,
424 	  pdc202xx_chip_map,
425 	},
426 	{ PCI_PRODUCT_PROMISE_PDC20268,
427 	  IDE_PCI_CLASS_OVERRIDE,
428 	  pdc202xx_chip_map,
429 	}
430 };
431 
432 const struct pciide_product_desc pciide_acard_products[] =  {
433 	{ PCI_PRODUCT_ACARD_ATP850U,	/* Acard ATP850U Ultra33 Controller */
434 	  IDE_PCI_CLASS_OVERRIDE,
435 	  acard_chip_map,
436 	},
437 	{ PCI_PRODUCT_ACARD_ATP860,	/* Acard ATP860 Ultra66 Controller */
438 	  IDE_PCI_CLASS_OVERRIDE,
439 	  acard_chip_map,
440 	},
441 	{ PCI_PRODUCT_ACARD_ATP860A,	/* Acard ATP860-A Ultra66 Controller */
442 	  IDE_PCI_CLASS_OVERRIDE,
443 	  acard_chip_map,
444 	}
445 };
446 
447 struct pciide_vendor_desc {
448 	u_int32_t ide_vendor;
449 	const struct pciide_product_desc *ide_products;
450 	int ide_nproducts;
451 };
452 
453 const struct pciide_vendor_desc pciide_vendors[] = {
454 	{ PCI_VENDOR_INTEL, pciide_intel_products,
455 	  sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) },
456 	{ PCI_VENDOR_AMD, pciide_amd_products,
457 	  sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) },
458 #ifdef notyet
459 	{ PCI_VENDOR_OPTI, pciide_opti_products,
460 	  sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) },
461 #endif
462 	{ PCI_VENDOR_CMDTECH, pciide_cmd_products,
463 	  sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) },
464 	{ PCI_VENDOR_VIATECH, pciide_via_products,
465 	  sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) },
466 	{ PCI_VENDOR_CONTAQ, pciide_cypress_products,
467 	  sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) },
468 	{ PCI_VENDOR_SIS, pciide_sis_products,
469 	  sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) },
470 	{ PCI_VENDOR_NS, pciide_natsemi_products,
471 	  sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) },
472 	{ PCI_VENDOR_ALI, pciide_acer_products,
473 	  sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) },
474 	{ PCI_VENDOR_TRIONES, pciide_triones_products,
475 	  sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) },
476 	{ PCI_VENDOR_ACARD, pciide_acard_products,
477 	  sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) },
478 	{ PCI_VENDOR_PROMISE, pciide_promise_products,
479 	  sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }
480 };
481 
482 /* options passed via the 'flags' config keyword */
483 #define PCIIDE_OPTIONS_DMA	0x01
484 
485 #ifndef __OpenBSD__
486 int	pciide_match __P((struct device *, struct cfdata *, void *));
487 #else
488 int	pciide_match __P((struct device *, void *, void *));
489 #endif
490 void	pciide_attach __P((struct device *, struct device *, void *));
491 
492 struct cfattach pciide_ca = {
493 	sizeof(struct pciide_softc), pciide_match, pciide_attach
494 };
495 
496 #ifdef __OpenBSD__
497 struct        cfdriver pciide_cd = {
498       NULL, "pciide", DV_DULL
499 };
500 #endif
501 int	pciide_chipen __P((struct pciide_softc *, struct pci_attach_args *));
502 int	pciide_mapregs_compat __P(( struct pci_attach_args *,
503 	    struct pciide_channel *, int, bus_size_t *, bus_size_t*));
504 int	pciide_mapregs_native __P((struct pci_attach_args *,
505 	    struct pciide_channel *, bus_size_t *, bus_size_t *,
506 	    int (*pci_intr) __P((void *))));
507 void	pciide_mapreg_dma __P((struct pciide_softc *,
508 	    struct pci_attach_args *));
509 int	pciide_chansetup __P((struct pciide_softc *, int, pcireg_t));
510 void	pciide_mapchan __P((struct pci_attach_args *,
511 	    struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *,
512 	    int (*pci_intr) __P((void *))));
513 int	pciide_chan_candisable __P((struct pciide_channel *));
514 void	pciide_map_compat_intr __P(( struct pci_attach_args *,
515 	    struct pciide_channel *, int, int));
516 void	pciide_unmap_compat_intr __P(( struct pci_attach_args *,
517 	    struct pciide_channel *, int, int));
518 int	pciide_compat_intr __P((void *));
519 int	pciide_pci_intr __P((void *));
520 int     pciide_intr_flag(struct pciide_channel *);
521 
522 const struct pciide_product_desc* pciide_lookup_product __P((u_int32_t));
523 
524 const struct pciide_product_desc *
525 pciide_lookup_product(id)
526 	u_int32_t id;
527 {
528 	const struct pciide_product_desc *pp;
529 	const struct pciide_vendor_desc *vp;
530 	int i;
531 
532 	for (i = 0, vp = pciide_vendors;
533 	    i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]);
534 	    vp++, i++)
535 		if (PCI_VENDOR(id) == vp->ide_vendor)
536 			break;
537 
538 	if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0]))
539 		return NULL;
540 
541 	for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++)
542 		if (PCI_PRODUCT(id) == pp->ide_product)
543 			break;
544 
545 	if (i == vp->ide_nproducts)
546 		return NULL;
547 	return pp;
548 }
549 
550 int
551 pciide_match(parent, match, aux)
552 	struct device *parent;
553 #ifdef __OpenBSD__
554 	void *match;
555 #else
556 	struct cfdata *match;
557 #endif
558 	void *aux;
559 {
560 	struct pci_attach_args *pa = aux;
561 	const struct pciide_product_desc *pp;
562 
563 	/*
564  	 * Some IDE controllers have severe bugs when used in PCI mode.
565 	 * We punt and attach them to the ISA bus instead.
566 	 */
567 	if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH &&
568 	    PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000)
569 		return (0);
570 
571 	/*
572 	 * Check the ID register to see that it's a PCI IDE controller.
573 	 * If it is, we assume that we can deal with it; it _should_
574 	 * work in a standardized way...
575 	 */
576 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE &&
577 	    PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
578 		return (1);
579 	}
580 
581 	/*
582  	 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE
583 	 * controllers. Let see if we can deal with it anyway.
584 	 */
585 	pp = pciide_lookup_product(pa->pa_id);
586 	if (pp  && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) {
587 		return (1);
588 	}
589 
590 	return (0);
591 }
592 
593 void
594 pciide_attach(parent, self, aux)
595 	struct device *parent, *self;
596 	void *aux;
597 {
598 	struct pci_attach_args *pa = aux;
599 	pci_chipset_tag_t pc = pa->pa_pc;
600 	pcitag_t tag = pa->pa_tag;
601 	struct pciide_softc *sc = (struct pciide_softc *)self;
602 	pcireg_t csr;
603 	char devinfo[256];
604 
605 	sc->sc_pp = pciide_lookup_product(pa->pa_id);
606 	if (sc->sc_pp == NULL) {
607 		sc->sc_pp = &default_product_desc;
608 		pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo);
609 	}
610 
611 	sc->sc_pc = pa->pa_pc;
612 	sc->sc_tag = pa->pa_tag;
613 
614 #ifdef WDCDEBUG
615        if (wdcdebug_pciide_mask & DEBUG_PROBE)
616                printf(" sc_pc %p, sc_tag %p\n", sc->sc_pc, sc->sc_tag);
617 #endif
618 
619 	sc->sc_pp->chip_map(sc, pa);
620 
621 	if (sc->sc_dma_ok) {
622 		csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
623 		csr |= PCI_COMMAND_MASTER_ENABLE;
624 		pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
625 	}
626 
627 	WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n",
628 	    pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE);
629 }
630 
631 /* tell wether the chip is enabled or not */
632 int
633 pciide_chipen(sc, pa)
634 	struct pciide_softc *sc;
635 	struct pci_attach_args *pa;
636 {
637 	pcireg_t csr;
638 	if ((pa->pa_flags & PCI_FLAGS_IO_ENABLED) == 0) {
639 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
640 		    PCI_COMMAND_STATUS_REG);
641 		printf("%s: device disabled (at %s)\n",
642 		    sc->sc_wdcdev.sc_dev.dv_xname,
643 		    (csr & PCI_COMMAND_IO_ENABLE) == 0 ?
644 		    "device" : "bridge");
645 		return 0;
646 	}
647 	return 1;
648 }
649 
650 int
651 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep)
652 	struct pci_attach_args *pa;
653 	struct pciide_channel *cp;
654 	int compatchan;
655 	bus_size_t *cmdsizep, *ctlsizep;
656 {
657 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
658 	struct channel_softc *wdc_cp = &cp->wdc_channel;
659 
660 	cp->compat = 1;
661 	*cmdsizep = PCIIDE_COMPAT_CMD_SIZE;
662 	*ctlsizep = PCIIDE_COMPAT_CTL_SIZE;
663 
664 	wdc_cp->cmd_iot = pa->pa_iot;
665 
666 	if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan),
667 	    PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) {
668 		printf("%s: couldn't map %s cmd regs\n",
669 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
670 		return (0);
671 	}
672 
673 	wdc_cp->ctl_iot = pa->pa_iot;
674 
675 	if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan),
676 	    PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) {
677 		printf("%s: couldn't map %s ctl regs\n",
678 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
679 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh,
680 		    PCIIDE_COMPAT_CMD_SIZE);
681 		return (0);
682 	}
683 
684 	return (1);
685 }
686 
687 int
688 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr)
689 	struct pci_attach_args * pa;
690 	struct pciide_channel *cp;
691 	bus_size_t *cmdsizep, *ctlsizep;
692 	int (*pci_intr) __P((void *));
693 {
694 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
695 	struct channel_softc *wdc_cp = &cp->wdc_channel;
696 	const char *intrstr;
697 	pci_intr_handle_t intrhandle;
698 
699 	cp->compat = 0;
700 
701 	if (sc->sc_pci_ih == NULL) {
702 		if (pci_intr_map(pa, &intrhandle) != 0) {
703 			printf("%s: couldn't map native-PCI interrupt\n",
704 			    sc->sc_wdcdev.sc_dev.dv_xname);
705 			return 0;
706 		}
707 		intrstr = pci_intr_string(pa->pa_pc, intrhandle);
708 #ifdef __OpenBSD__
709 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
710 		    intrhandle, IPL_BIO, pci_intr, sc,
711 		    sc->sc_wdcdev.sc_dev.dv_xname);
712 #else
713 		sc->sc_pci_ih = pci_intr_establish(pa->pa_pc,
714 		    intrhandle, IPL_BIO, pci_intr, sc);
715 #endif
716 		if (sc->sc_pci_ih != NULL) {
717 			printf("%s: using %s for native-PCI interrupt\n",
718 			    sc->sc_wdcdev.sc_dev.dv_xname,
719 			    intrstr ? intrstr : "unknown interrupt");
720 		} else {
721 			printf("%s: couldn't establish native-PCI interrupt",
722 			    sc->sc_wdcdev.sc_dev.dv_xname);
723 			if (intrstr != NULL)
724 				printf(" at %s", intrstr);
725 			printf("\n");
726 			return 0;
727 		}
728 	}
729 	cp->ih = sc->sc_pci_ih;
730 	if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel),
731 	    PCI_MAPREG_TYPE_IO, 0,
732 	    &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) {
733 		printf("%s: couldn't map %s cmd regs\n",
734 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
735 		return 0;
736 	}
737 
738 	if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel),
739 	    PCI_MAPREG_TYPE_IO, 0,
740 	    &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) {
741 		printf("%s: couldn't map %s ctl regs\n",
742 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
743 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
744 		return 0;
745 	}
746 	/*
747 	 * In native mode, 4 bytes of I/O space are mapped for the control
748 	 * register, the control register is at offset 2. Pass the generic
749 	 * code a handle for only one byte at the right offset.
750 	 */
751 	if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1,
752 	    &wdc_cp->ctl_ioh) != 0) {
753 		printf("%s: unable to subregion %s channel ctl regs\n",
754 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
755 		bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep);
756 		bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep);
757 		return 0;
758 	}
759 	return (1);
760 }
761 
762 void
763 pciide_mapreg_dma(sc, pa)
764 	struct pciide_softc *sc;
765 	struct pci_attach_args *pa;
766 {
767 	pcireg_t maptype;
768 	bus_addr_t addr;
769 
770 	/*
771 	 * Map DMA registers
772 	 *
773 	 * Note that sc_dma_ok is the right variable to test to see if
774 	 * DMA can be done.  If the interface doesn't support DMA,
775 	 * sc_dma_ok will never be non-zero.  If the DMA regs couldn't
776 	 * be mapped, it'll be zero.  I.e., sc_dma_ok will only be
777 	 * non-zero if the interface supports DMA and the registers
778 	 * could be mapped.
779 	 *
780 	 * XXX Note that despite the fact that the Bus Master IDE specs
781 	 * XXX say that "The bus master IDE function uses 16 bytes of IO
782 	 * XXX space," some controllers (at least the United
783 	 * XXX Microelectronics UM8886BF) place it in memory space.
784 	 */
785 
786 	maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag,
787 	    PCIIDE_REG_BUS_MASTER_DMA);
788 
789 	switch (maptype) {
790 	case PCI_MAPREG_TYPE_IO:
791 		sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag,
792 		    PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO,
793 		    &addr, NULL, NULL) == 0);
794 		if (sc->sc_dma_ok == 0) {
795 			printf(", unused (couldn't query registers)");
796 			break;
797 		}
798 		if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE)
799 		    && addr >= 0x10000) {
800 			sc->sc_dma_ok = 0;
801 			printf(", unused (registers at unsafe address %#lx)", addr);
802 			break;
803 		}
804 		/* FALLTHROUGH */
805 
806 	case PCI_MAPREG_MEM_TYPE_32BIT:
807 		sc->sc_dma_ok = (pci_mapreg_map(pa,
808 		    PCIIDE_REG_BUS_MASTER_DMA, maptype, 0,
809 		    &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0);
810 		sc->sc_dmat = pa->pa_dmat;
811 		if (sc->sc_dma_ok == 0) {
812 			printf(", unused (couldn't map registers)");
813 		} else {
814 			sc->sc_wdcdev.dma_arg = sc;
815 			sc->sc_wdcdev.dma_init = pciide_dma_init;
816 			sc->sc_wdcdev.dma_start = pciide_dma_start;
817 			sc->sc_wdcdev.dma_finish = pciide_dma_finish;
818 		}
819 		break;
820 
821 	default:
822 		sc->sc_dma_ok = 0;
823 		printf(", (unsupported maptype 0x%x)", maptype);
824 		break;
825 	}
826 }
827 
828 int
829 pciide_intr_flag(struct pciide_channel *cp)
830 {
831 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
832 
833 	if (cp->dma_in_progress) {
834 		int retry = 10;
835 		int status;
836 
837 		/* Check the status register */
838 		for (retry = 10; retry > 0; retry--) {
839 			status = bus_space_read_1(sc->sc_dma_iot,
840 			    sc->sc_dma_ioh,
841 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET *
842 			    cp->wdc_channel.channel);
843 			if (status & IDEDMA_CTL_INTR) {
844 				break;
845 			}
846 			DELAY(5);
847 		}
848 
849 		/* Not for us.  */
850 		if (retry == 0)
851 			return (0);
852 
853 		return (1);
854 	}
855 
856 	return (-1);
857 }
858 
859 int
860 pciide_compat_intr(arg)
861 	void *arg;
862 {
863 	struct pciide_channel *cp = arg;
864 
865 	if (pciide_intr_flag(cp) == 0)
866 		return 0;
867 
868 #ifdef DIAGNOSTIC
869 	/* should only be called for a compat channel */
870 	if (cp->compat == 0)
871 		panic("pciide compat intr called for non-compat chan %p\n", cp);
872 #endif
873 	return (wdcintr(&cp->wdc_channel));
874 }
875 
876 int
877 pciide_pci_intr(arg)
878 	void *arg;
879 {
880 	struct pciide_softc *sc = arg;
881 	struct pciide_channel *cp;
882 	struct channel_softc *wdc_cp;
883 	int i, rv, crv;
884 
885 	rv = 0;
886 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
887 		cp = &sc->pciide_channels[i];
888 		wdc_cp = &cp->wdc_channel;
889 
890 		/* If a compat channel skip. */
891 		if (cp->compat)
892 			continue;
893 		/* if this channel not waiting for intr, skip */
894 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0)
895 			continue;
896 
897 		if (pciide_intr_flag(cp) == 0)
898 			continue;
899 
900 		crv = wdcintr(wdc_cp);
901 		if (crv == 0)
902 			;		/* leave rv alone */
903 		else if (crv == 1)
904 			rv = 1;		/* claim the intr */
905 		else if (rv == 0)	/* crv should be -1 in this case */
906 			rv = crv;	/* if we've done no better, take it */
907 	}
908 	return (rv);
909 }
910 
911 void
912 pciide_channel_dma_setup(cp)
913 	struct pciide_channel *cp;
914 {
915 	int drive;
916 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
917 	struct ata_drive_datas *drvp;
918 
919 	for (drive = 0; drive < 2; drive++) {
920 		drvp = &cp->wdc_channel.ch_drive[drive];
921 		/* If no drive, skip */
922 		if ((drvp->drive_flags & DRIVE) == 0)
923 			continue;
924 		/* setup DMA if needed */
925 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
926 		    (drvp->drive_flags & DRIVE_UDMA) == 0) ||
927 		    sc->sc_dma_ok == 0) {
928 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
929 			continue;
930 		}
931 		if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive)
932 		    != 0) {
933 			/* Abort DMA setup */
934 			drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA);
935 			continue;
936 		}
937 	}
938 }
939 
940 int
941 pciide_dma_table_setup(sc, channel, drive)
942 	struct pciide_softc *sc;
943 	int channel, drive;
944 {
945 	bus_dma_segment_t seg;
946 	int error, rseg;
947 	const bus_size_t dma_table_size =
948 	    sizeof(struct idedma_table) * NIDEDMA_TABLES;
949 	struct pciide_dma_maps *dma_maps =
950 	    &sc->pciide_channels[channel].dma_maps[drive];
951 
952 	/* If table was already allocated, just return */
953 	if (dma_maps->dma_table)
954 		return 0;
955 
956 	/* Allocate memory for the DMA tables and map it */
957 	if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size,
958 	    IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg,
959 	    BUS_DMA_NOWAIT)) != 0) {
960 		printf("%s:%d: unable to allocate table DMA for "
961 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
962 		    channel, drive, error);
963 		return error;
964 	}
965 
966 	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
967 	    dma_table_size,
968 	    (caddr_t *)&dma_maps->dma_table,
969 	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
970 		printf("%s:%d: unable to map table DMA for"
971 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
972 		    channel, drive, error);
973 		return error;
974 	}
975 
976 	WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, "
977 	    "phy 0x%lx\n", dma_maps->dma_table, dma_table_size,
978 	    seg.ds_addr), DEBUG_PROBE);
979 
980 	/* Create and load table DMA map for this disk */
981 	if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size,
982 	    1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT,
983 	    &dma_maps->dmamap_table)) != 0) {
984 		printf("%s:%d: unable to create table DMA map for "
985 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
986 		    channel, drive, error);
987 		return error;
988 	}
989 	if ((error = bus_dmamap_load(sc->sc_dmat,
990 	    dma_maps->dmamap_table,
991 	    dma_maps->dma_table,
992 	    dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) {
993 		printf("%s:%d: unable to load table DMA map for "
994 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
995 		    channel, drive, error);
996 		return error;
997 	}
998 	WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n",
999 	    dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE);
1000 	/* Create a xfer DMA map for this drive */
1001 	if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX,
1002 	    NIDEDMA_TABLES, IDEDMA_BYTE_COUNT_MAX, IDEDMA_BYTE_COUNT_ALIGN,
1003 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1004 	    &dma_maps->dmamap_xfer)) != 0) {
1005 		printf("%s:%d: unable to create xfer DMA map for "
1006 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1007 		    channel, drive, error);
1008 		return error;
1009 	}
1010 	return 0;
1011 }
1012 
1013 int
1014 pciide_dma_init(v, channel, drive, databuf, datalen, flags)
1015 	void *v;
1016 	int channel, drive;
1017 	void *databuf;
1018 	size_t datalen;
1019 	int flags;
1020 {
1021 	struct pciide_softc *sc = v;
1022 	int error, seg;
1023 	struct pciide_dma_maps *dma_maps =
1024 	    &sc->pciide_channels[channel].dma_maps[drive];
1025 #ifndef BUS_DMA_RAW
1026 #define BUS_DMA_RAW 0
1027 #endif
1028 
1029 	error = bus_dmamap_load(sc->sc_dmat,
1030 	    dma_maps->dmamap_xfer,
1031 	    databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW);
1032 	if (error) {
1033 		printf("%s:%d: unable to load xfer DMA map for"
1034 		    "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname,
1035 		    channel, drive, error);
1036 		return error;
1037 	}
1038 
1039 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1040 	    dma_maps->dmamap_xfer->dm_mapsize,
1041 	    (flags & WDC_DMA_READ) ?
1042 	    BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
1043 
1044 	for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) {
1045 #ifdef DIAGNOSTIC
1046 		/* A segment must not cross a 64k boundary */
1047 		{
1048 		u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr;
1049 		u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len;
1050 		if ((phys & ~IDEDMA_BYTE_COUNT_MASK) !=
1051 		    ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) {
1052 			printf("pciide_dma: segment %d physical addr 0x%lx"
1053 			    " len 0x%lx not properly aligned\n",
1054 			    seg, phys, len);
1055 			panic("pciide_dma: buf align");
1056 		}
1057 		}
1058 #endif
1059 		dma_maps->dma_table[seg].base_addr =
1060 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr);
1061 		dma_maps->dma_table[seg].byte_count =
1062 		    htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len &
1063 		    IDEDMA_BYTE_COUNT_MASK);
1064 		WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n",
1065 		   seg, letoh32(dma_maps->dma_table[seg].byte_count),
1066 		   letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA);
1067 
1068 	}
1069 	dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |=
1070 	    htole32(IDEDMA_BYTE_COUNT_EOT);
1071 
1072 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0,
1073 	    dma_maps->dmamap_table->dm_mapsize,
1074 	    BUS_DMASYNC_PREWRITE);
1075 
1076 	/* Maps are ready. Start DMA function */
1077 #ifdef DIAGNOSTIC
1078 	if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) {
1079 		printf("pciide_dma_init: addr 0x%lx not properly aligned\n",
1080 		    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1081 		panic("pciide_dma_init: table align");
1082 	}
1083 #endif
1084 
1085 	/* Clear status bits */
1086 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1087 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1088 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1089 		IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel));
1090 	/* Write table addr */
1091 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
1092 	    IDEDMA_TBL + IDEDMA_SCH_OFFSET * channel,
1093 	    dma_maps->dmamap_table->dm_segs[0].ds_addr);
1094 	/* set read/write */
1095 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1096 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1097 	    (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0);
1098 	/* remember flags */
1099 	dma_maps->dma_flags = flags;
1100 	return 0;
1101 }
1102 
1103 void
1104 pciide_dma_start(v, channel, drive)
1105 	void *v;
1106 	int channel, drive;
1107 {
1108 	struct pciide_softc *sc = v;
1109 
1110 	WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS);
1111 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1112 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1113 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1114 		IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel) | IDEDMA_CMD_START);
1115 
1116 	sc->pciide_channels[channel].dma_in_progress = 1;
1117 }
1118 
1119 int
1120 pciide_dma_finish(v, channel, drive)
1121 	void *v;
1122 	int channel, drive;
1123 {
1124 	struct pciide_softc *sc = v;
1125 	u_int8_t status;
1126 	int error = 0;
1127 	struct pciide_dma_maps *dma_maps =
1128 	    &sc->pciide_channels[channel].dma_maps[drive];
1129 
1130 	sc->pciide_channels[channel].dma_in_progress = 0;
1131 
1132 	status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1133 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel);
1134 	WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status),
1135 	    DEBUG_XFERS);
1136 
1137 	/* stop DMA channel */
1138 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1139 	    IDEDMA_CMD + IDEDMA_SCH_OFFSET * channel,
1140 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1141 	    0x00 : IDEDMA_CMD_WRITE);
1142 
1143 	/* Unload the map of the data buffer */
1144 	bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0,
1145 	    dma_maps->dmamap_xfer->dm_mapsize,
1146 	    (dma_maps->dma_flags & WDC_DMA_READ) ?
1147 	    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1148 	bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer);
1149 
1150 	/* Clear status bits */
1151 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1152 	    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel,
1153 	    status);
1154 
1155 	if ((status & IDEDMA_CTL_ERR) != 0) {
1156 		printf("%s:%d:%d: bus-master DMA error: status=0x%x\n",
1157 		    sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status);
1158 		error |= WDC_DMAST_ERR;
1159 	}
1160 
1161 	if ((status & IDEDMA_CTL_INTR) == 0) {
1162 		printf("%s:%d:%d: bus-master DMA error: missing interrupt, "
1163 		    "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel,
1164 		    drive, status);
1165 		error |= WDC_DMAST_NOIRQ;
1166 	}
1167 
1168 	if ((status & IDEDMA_CTL_ACT) != 0) {
1169 		/* data underrun, may be a valid condition for ATAPI */
1170 		error |= WDC_DMAST_UNDER;
1171 	}
1172 	return error;
1173 }
1174 
1175 void
1176 pciide_irqack(chp)
1177         struct channel_softc *chp;
1178 {
1179         struct pciide_channel *cp = (struct pciide_channel*)chp;
1180         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1181 
1182         /* clear status bits in IDE DMA registers */
1183         bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1184             IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel,
1185             bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1186                 IDEDMA_CTL + IDEDMA_SCH_OFFSET * chp->channel));
1187 }
1188 
1189 /* some common code used by several chip_map */
1190 int
1191 pciide_chansetup(sc, channel, interface)
1192 	struct pciide_softc *sc;
1193 	int channel;
1194 	pcireg_t interface;
1195 {
1196 	struct pciide_channel *cp = &sc->pciide_channels[channel];
1197 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
1198 	cp->name = PCIIDE_CHANNEL_NAME(channel);
1199 	cp->wdc_channel.channel = channel;
1200 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
1201 	cp->wdc_channel.ch_queue =
1202 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
1203 	if (cp->wdc_channel.ch_queue == NULL) {
1204 		printf("%s: %s "
1205 		    "cannot allocate memory for command queue",
1206 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1207 		return 0;
1208 	}
1209 	cp->hw_ok = 1;
1210 
1211 	return 1;
1212 }
1213 
1214 /* some common code used by several chip channel_map */
1215 void
1216 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr)
1217 	struct pci_attach_args *pa;
1218 	struct pciide_channel *cp;
1219 	pcireg_t interface;
1220 	bus_size_t *cmdsizep, *ctlsizep;
1221 	int (*pci_intr) __P((void *));
1222 {
1223 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1224 
1225 	if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel))
1226 		cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep,
1227 		    pci_intr);
1228 	else
1229 		cp->hw_ok = pciide_mapregs_compat(pa, cp,
1230 		    wdc_cp->channel, cmdsizep, ctlsizep);
1231 	if (cp->hw_ok == 0)
1232 		return;
1233 	wdc_cp->data32iot = wdc_cp->cmd_iot;
1234 	wdc_cp->data32ioh = wdc_cp->cmd_ioh;
1235 	wdcattach(wdc_cp);
1236 }
1237 
1238 /*
1239  * Generic code to call to know if a channel can be disabled. Return 1
1240  * if channel can be disabled, 0 if not
1241  */
1242 int
1243 pciide_chan_candisable(cp)
1244 	struct pciide_channel *cp;
1245 {
1246 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1247 
1248 	if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 &&
1249 	    (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) {
1250 		cp->hw_ok = 0;
1251 		return 1;
1252 	}
1253 	return 0;
1254 }
1255 
1256 /*
1257  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1258  * Set hw_ok=0 on failure
1259  */
1260 void
1261 pciide_map_compat_intr(pa, cp, compatchan, interface)
1262 	struct pci_attach_args *pa;
1263 	struct pciide_channel *cp;
1264 	int compatchan, interface;
1265 {
1266 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1267 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1268 
1269 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1270 		return;
1271 
1272 	cp->compat = 1;
1273 	cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev,
1274 	    pa, compatchan, pciide_compat_intr, cp);
1275 	if (cp->ih == NULL) {
1276 		printf("%s: no compatibility interrupt for use by %s\n",
1277 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1278 		cp->hw_ok = 0;
1279 	}
1280 }
1281 
1282 /*
1283  * generic code to map the compat intr if hw_ok=1 and it is a compat channel.
1284  * Set hw_ok=0 on failure
1285  */
1286 void
1287 pciide_unmap_compat_intr(pa, cp, compatchan, interface)
1288 	struct pci_attach_args *pa;
1289 	struct pciide_channel *cp;
1290 	int compatchan, interface;
1291 {
1292 	struct channel_softc *wdc_cp = &cp->wdc_channel;
1293 
1294 	if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0)
1295 		return;
1296 
1297 	pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih);
1298 }
1299 
1300 void
1301 pciide_print_channels(nchannels, interface)
1302 	int nchannels;
1303 	pcireg_t interface;
1304 {
1305 	int i;
1306 
1307 	for (i = 0; i < nchannels; i++) {
1308 		printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i),
1309 		    (interface & PCIIDE_INTERFACE_SETTABLE(i)) ?
1310 		    "configured" : "wired",
1311 		    (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" :
1312 		    "compatibility");
1313 	}
1314 
1315 	printf("\n");
1316 }
1317 
1318 void
1319 pciide_print_modes(cp)
1320 	struct pciide_channel *cp;
1321 {
1322 	wdc_print_current_modes(&cp->wdc_channel);
1323 }
1324 
1325 void
1326 default_chip_map(sc, pa)
1327 	struct pciide_softc *sc;
1328 	struct pci_attach_args *pa;
1329 {
1330 	struct pciide_channel *cp;
1331 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1332 	pcireg_t csr;
1333 	int channel, drive;
1334 	struct ata_drive_datas *drvp;
1335 	u_int8_t idedma_ctl;
1336 	bus_size_t cmdsize, ctlsize;
1337 	char *failreason;
1338 
1339 	if (pciide_chipen(sc, pa) == 0)
1340 		return;
1341 
1342 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
1343 		printf(": DMA");
1344 		if (sc->sc_pp == &default_product_desc &&
1345 		    (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags &
1346 		    PCIIDE_OPTIONS_DMA) == 0) {
1347 			printf(" (unsupported)");
1348 			sc->sc_dma_ok = 0;
1349 		} else {
1350 			pciide_mapreg_dma(sc, pa);
1351 			if (sc->sc_dma_ok != 0)
1352 				printf(", (partial support)");
1353 		}
1354 	} else {
1355 		printf(": no DMA");
1356 		sc->sc_dma_ok = 0;
1357 	}
1358 	if (sc->sc_dma_ok) {
1359 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1360 		sc->sc_wdcdev.irqack = pciide_irqack;
1361 	}
1362 	sc->sc_wdcdev.PIO_cap = 0;
1363 	sc->sc_wdcdev.DMA_cap = 0;
1364 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1365 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1366 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16;
1367 
1368 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1369 
1370 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1371 		cp = &sc->pciide_channels[channel];
1372 		if (pciide_chansetup(sc, channel, interface) == 0)
1373 			continue;
1374 		if (interface & PCIIDE_INTERFACE_PCI(channel)) {
1375 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
1376 			    &ctlsize, pciide_pci_intr);
1377 		} else {
1378 			cp->hw_ok = pciide_mapregs_compat(pa, cp,
1379 			    channel, &cmdsize, &ctlsize);
1380 		}
1381 		if (cp->hw_ok == 0)
1382 			continue;
1383 		/*
1384 		 * Check to see if something appears to be there.
1385 		 */
1386 		failreason = NULL;
1387 		pciide_map_compat_intr(pa, cp, channel, interface);
1388 		if (cp->hw_ok == 0)
1389 			continue;
1390 		if (!wdcprobe(&cp->wdc_channel)) {
1391 			failreason = "not responding; disabled or no drives?";
1392 			goto next;
1393 		}
1394 		/*
1395 		 * Now, make sure it's actually attributable to this PCI IDE
1396 		 * channel by trying to access the channel again while the
1397 		 * PCI IDE controller's I/O space is disabled.  (If the
1398 		 * channel no longer appears to be there, it belongs to
1399 		 * this controller.)  YUCK!
1400 		 */
1401 		csr = pci_conf_read(sc->sc_pc, sc->sc_tag,
1402 	  	    PCI_COMMAND_STATUS_REG);
1403 		pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG,
1404 		    csr & ~PCI_COMMAND_IO_ENABLE);
1405 		if (wdcprobe(&cp->wdc_channel))
1406 			failreason = "other hardware responding at addresses";
1407 		pci_conf_write(sc->sc_pc, sc->sc_tag,
1408 		    PCI_COMMAND_STATUS_REG, csr);
1409 next:
1410 		if (failreason) {
1411 			printf("%s: %s ignored (%s)\n",
1412 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name,
1413 			    failreason);
1414 			cp->hw_ok = 0;
1415 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1416 			bus_space_unmap(cp->wdc_channel.cmd_iot,
1417 			    cp->wdc_channel.cmd_ioh, cmdsize);
1418 			bus_space_unmap(cp->wdc_channel.ctl_iot,
1419 			    cp->wdc_channel.ctl_ioh, ctlsize);
1420 		}
1421 		if (cp->hw_ok) {
1422 			cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
1423 			cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
1424 			wdcattach(&cp->wdc_channel);
1425 		}
1426 	}
1427 
1428 	if (sc->sc_dma_ok == 0)
1429 		return;
1430 
1431 	/* Allocate DMA maps */
1432 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1433 		idedma_ctl = 0;
1434 		cp = &sc->pciide_channels[channel];
1435 		for (drive = 0; drive < 2; drive++) {
1436 			drvp = &cp->wdc_channel.ch_drive[drive];
1437 			/* If no drive, skip */
1438 			if ((drvp->drive_flags & DRIVE) == 0)
1439 				continue;
1440 			if ((drvp->drive_flags & DRIVE_DMA) == 0)
1441 				continue;
1442 			if (pciide_dma_table_setup(sc, channel, drive) != 0) {
1443 				/* Abort DMA setup */
1444 				printf("%s:%d:%d: cannot allocate DMA maps, "
1445 				    "using PIO transfers\n",
1446 				    sc->sc_wdcdev.sc_dev.dv_xname,
1447 				    channel, drive);
1448 				drvp->drive_flags &= ~DRIVE_DMA;
1449 			}
1450 			printf("%s:%d:%d: using DMA data transfers\n",
1451 			    sc->sc_wdcdev.sc_dev.dv_xname,
1452 			    channel, drive);
1453 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1454 		}
1455 		if (idedma_ctl != 0) {
1456 			/* Add software bits in status register */
1457 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1458 			    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1459 			    idedma_ctl);
1460 		}
1461 	}
1462 }
1463 
1464 void
1465 piix_chip_map(sc, pa)
1466 	struct pciide_softc *sc;
1467 	struct pci_attach_args *pa;
1468 {
1469 	struct pciide_channel *cp;
1470 	int channel;
1471 	u_int32_t idetim;
1472 	bus_size_t cmdsize, ctlsize;
1473 
1474 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1475 
1476 	if (pciide_chipen(sc, pa) == 0)
1477 		return;
1478 
1479 	printf(": DMA");
1480 	pciide_mapreg_dma(sc, pa);
1481 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1482 	    WDC_CAPABILITY_MODE;
1483 	if (sc->sc_dma_ok) {
1484 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
1485 		sc->sc_wdcdev.irqack = pciide_irqack;
1486 		switch (sc->sc_pp->ide_product) {
1487 		case PCI_PRODUCT_INTEL_82371AB_IDE:
1488 		case PCI_PRODUCT_INTEL_82440MX_IDE:
1489 		case PCI_PRODUCT_INTEL_82801AA_IDE:
1490 		case PCI_PRODUCT_INTEL_82801AB_IDE:
1491 		case PCI_PRODUCT_INTEL_82801BAM_IDE:
1492 		case PCI_PRODUCT_INTEL_82801BA_IDE:
1493 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
1494 			break;
1495 		}
1496 	}
1497 	sc->sc_wdcdev.PIO_cap = 4;
1498 	sc->sc_wdcdev.DMA_cap = 2;
1499 	switch (sc->sc_pp->ide_product) {
1500 	case PCI_PRODUCT_INTEL_82801AA_IDE:
1501 		sc->sc_wdcdev.UDMA_cap = 4;
1502 		break;
1503 	case PCI_PRODUCT_INTEL_82801BAM_IDE:
1504 	case PCI_PRODUCT_INTEL_82801BA_IDE:
1505 		sc->sc_wdcdev.UDMA_cap = 5;
1506 		break;
1507 	default:
1508 		sc->sc_wdcdev.UDMA_cap = 2;
1509 		break;
1510 	}
1511 	if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE)
1512 		sc->sc_wdcdev.set_modes = piix_setup_channel;
1513 	else
1514 		sc->sc_wdcdev.set_modes = piix3_4_setup_channel;
1515 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1516 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1517 
1518 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1519 
1520 	WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x",
1521 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1522 	    DEBUG_PROBE);
1523 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1524 		WDCDEBUG_PRINT((", sidetim=0x%x",
1525 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1526 		    DEBUG_PROBE);
1527 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1528 			WDCDEBUG_PRINT((", udamreg 0x%x",
1529 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1530 			    DEBUG_PROBE);
1531 		}
1532 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1533 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1534 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1535 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1536 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1537 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1538 			    DEBUG_PROBE);
1539 		}
1540 
1541 	}
1542 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1543 
1544 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1545 		cp = &sc->pciide_channels[channel];
1546 		/* PIIX is compat-only */
1547 		if (pciide_chansetup(sc, channel, 0) == 0)
1548 			continue;
1549 		idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1550 		if ((PIIX_IDETIM_READ(idetim, channel) &
1551 		    PIIX_IDETIM_IDE) == 0) {
1552 			printf("%s: %s ignored (disabled)\n",
1553 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1554 			continue;
1555 		}
1556 		/* PIIX are compat-only pciide devices */
1557 		pciide_map_compat_intr(pa, cp, channel, 0);
1558 		if (cp->hw_ok == 0)
1559 			continue;
1560 		pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr);
1561 		if (cp->hw_ok == 0)
1562 			goto next;
1563 		if (pciide_chan_candisable(cp)) {
1564 			idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE,
1565 			    channel);
1566 			pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM,
1567 			    idetim);
1568 		}
1569 		if (cp->hw_ok == 0)
1570 			goto next;
1571 		sc->sc_wdcdev.set_modes(&cp->wdc_channel);
1572 next:
1573 		if (cp->hw_ok == 0)
1574 			pciide_unmap_compat_intr(pa, cp, channel, 0);
1575 	}
1576 
1577 	WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x",
1578 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)),
1579 	    DEBUG_PROBE);
1580 	if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) {
1581 		WDCDEBUG_PRINT((", sidetim=0x%x",
1582 		    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)),
1583 		    DEBUG_PROBE);
1584 		if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
1585 			WDCDEBUG_PRINT((", udamreg 0x%x",
1586 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)),
1587 			    DEBUG_PROBE);
1588 		}
1589 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1590 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1591 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1592 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1593 			WDCDEBUG_PRINT((", IDE_CONTROL 0x%x",
1594 			    pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)),
1595 			    DEBUG_PROBE);
1596 		}
1597 	}
1598 	WDCDEBUG_PRINT(("\n"), DEBUG_PROBE);
1599 }
1600 
1601 void
1602 piix_setup_channel(chp)
1603 	struct channel_softc *chp;
1604 {
1605 	u_int8_t mode[2], drive;
1606 	u_int32_t oidetim, idetim, idedma_ctl;
1607 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1608 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1609 	struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive;
1610 
1611 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1612 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel);
1613 	idedma_ctl = 0;
1614 
1615 	/* set up new idetim: Enable IDE registers decode */
1616 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE,
1617 	    chp->channel);
1618 
1619 	/* setup DMA */
1620 	pciide_channel_dma_setup(cp);
1621 
1622 	/*
1623 	 * Here we have to mess up with drives mode: PIIX can't have
1624 	 * different timings for master and slave drives.
1625 	 * We need to find the best combination.
1626 	 */
1627 
1628 	/* If both drives supports DMA, take the lower mode */
1629 	if ((drvp[0].drive_flags & DRIVE_DMA) &&
1630 	    (drvp[1].drive_flags & DRIVE_DMA)) {
1631 		mode[0] = mode[1] =
1632 		    min(drvp[0].DMA_mode, drvp[1].DMA_mode);
1633 		    drvp[0].DMA_mode = mode[0];
1634 		    drvp[1].DMA_mode = mode[1];
1635 		goto ok;
1636 	}
1637 	/*
1638 	 * If only one drive supports DMA, use its mode, and
1639 	 * put the other one in PIO mode 0 if mode not compatible
1640 	 */
1641 	if (drvp[0].drive_flags & DRIVE_DMA) {
1642 		mode[0] = drvp[0].DMA_mode;
1643 		mode[1] = drvp[1].PIO_mode;
1644 		if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] ||
1645 		    piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]])
1646 			mode[1] = drvp[1].PIO_mode = 0;
1647 		goto ok;
1648 	}
1649 	if (drvp[1].drive_flags & DRIVE_DMA) {
1650 		mode[1] = drvp[1].DMA_mode;
1651 		mode[0] = drvp[0].PIO_mode;
1652 		if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] ||
1653 		    piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]])
1654 			mode[0] = drvp[0].PIO_mode = 0;
1655 		goto ok;
1656 	}
1657 	/*
1658 	 * If both drives are not DMA, takes the lower mode, unless
1659 	 * one of them is PIO mode < 2
1660 	 */
1661 	if (drvp[0].PIO_mode < 2) {
1662 		mode[0] = drvp[0].PIO_mode = 0;
1663 		mode[1] = drvp[1].PIO_mode;
1664 	} else if (drvp[1].PIO_mode < 2) {
1665 		mode[1] = drvp[1].PIO_mode = 0;
1666 		mode[0] = drvp[0].PIO_mode;
1667 	} else {
1668 		mode[0] = mode[1] =
1669 		    min(drvp[1].PIO_mode, drvp[0].PIO_mode);
1670 		drvp[0].PIO_mode = mode[0];
1671 		drvp[1].PIO_mode = mode[1];
1672 	}
1673 ok:	/* The modes are setup */
1674 	for (drive = 0; drive < 2; drive++) {
1675 		if (drvp[drive].drive_flags & DRIVE_DMA) {
1676 			idetim |= piix_setup_idetim_timings(
1677 			    mode[drive], 1, chp->channel);
1678 			goto end;
1679 		}
1680 	}
1681 	/* If we are there, none of the drives are DMA */
1682 	if (mode[0] >= 2)
1683 		idetim |= piix_setup_idetim_timings(
1684 		    mode[0], 0, chp->channel);
1685 	else
1686 		idetim |= piix_setup_idetim_timings(
1687 		    mode[1], 0, chp->channel);
1688 end:	/*
1689 	 * timing mode is now set up in the controller. Enable
1690 	 * it per-drive
1691 	 */
1692 	for (drive = 0; drive < 2; drive++) {
1693 		/* If no drive, skip */
1694 		if ((drvp[drive].drive_flags & DRIVE) == 0)
1695 			continue;
1696 		idetim |= piix_setup_idetim_drvs(&drvp[drive]);
1697 		if (drvp[drive].drive_flags & DRIVE_DMA)
1698 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1699 	}
1700 	if (idedma_ctl != 0) {
1701 		/* Add software bits in status register */
1702 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1703 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
1704 		    idedma_ctl);
1705 	}
1706 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1707 	pciide_print_modes(cp);
1708 }
1709 
1710 void
1711 piix3_4_setup_channel(chp)
1712 	struct channel_softc *chp;
1713 {
1714 	struct ata_drive_datas *drvp;
1715 	u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl;
1716 	struct pciide_channel *cp = (struct pciide_channel*)chp;
1717 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
1718 	int drive;
1719 	int channel = chp->channel;
1720 
1721 	oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM);
1722 	sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM);
1723 	udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG);
1724 	ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG);
1725 	idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel);
1726 	sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) |
1727 	    PIIX_SIDETIM_RTC_MASK(channel));
1728 
1729 	idedma_ctl = 0;
1730 	/* If channel disabled, no need to go further */
1731 	if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0)
1732 		return;
1733 	/* set up new idetim: Enable IDE registers decode */
1734 	idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel);
1735 
1736 	/* setup DMA if needed */
1737 	pciide_channel_dma_setup(cp);
1738 
1739 	for (drive = 0; drive < 2; drive++) {
1740 		udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) |
1741 		    PIIX_UDMATIM_SET(0x3, channel, drive));
1742 		drvp = &chp->ch_drive[drive];
1743 		/* If no drive, skip */
1744 		if ((drvp->drive_flags & DRIVE) == 0)
1745 			continue;
1746 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
1747 		    (drvp->drive_flags & DRIVE_UDMA) == 0))
1748 			goto pio;
1749 
1750 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE ||
1751 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE ||
1752 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1753 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE ) {
1754 			ideconf |= PIIX_CONFIG_PINGPONG;
1755 		}
1756 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE ||
1757 		    sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE) {
1758 			/* setup Ultra/100 */
1759 			if (drvp->UDMA_mode > 2 &&
1760 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1761 				drvp->UDMA_mode = 2;
1762 			if (drvp->UDMA_mode > 4) {
1763 				ideconf |= PIIX_CONFIG_UDMA100(channel, drive);
1764 			} else {
1765 				ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive);
1766 				if (drvp->UDMA_mode > 2) {
1767 					ideconf |= PIIX_CONFIG_UDMA66(channel,
1768 					    drive);
1769 				} else {
1770 					ideconf &= ~PIIX_CONFIG_UDMA66(channel,
1771 					    drive);
1772 				}
1773 			}
1774 		}
1775 		if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) {
1776 			/* setup Ultra/66 */
1777 			if (drvp->UDMA_mode > 2 &&
1778 			    (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0)
1779 				drvp->UDMA_mode = 2;
1780 			if (drvp->UDMA_mode > 2)
1781 				ideconf |= PIIX_CONFIG_UDMA66(channel, drive);
1782 			else
1783 				ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive);
1784 		}
1785 
1786 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
1787 		    (drvp->drive_flags & DRIVE_UDMA)) {
1788 			/* use Ultra/DMA */
1789 			drvp->drive_flags &= ~DRIVE_DMA;
1790 			udmareg |= PIIX_UDMACTL_DRV_EN( channel,drive);
1791 			udmareg |= PIIX_UDMATIM_SET(
1792 			    piix4_sct_udma[drvp->UDMA_mode], channel, drive);
1793 		} else {
1794 			/* use Multiword DMA */
1795 			drvp->drive_flags &= ~DRIVE_UDMA;
1796 			if (drive == 0) {
1797 				idetim |= piix_setup_idetim_timings(
1798 				    drvp->DMA_mode, 1, channel);
1799 			} else {
1800 				sidetim |= piix_setup_sidetim_timings(
1801 					drvp->DMA_mode, 1, channel);
1802 				idetim =PIIX_IDETIM_SET(idetim,
1803 				    PIIX_IDETIM_SITRE, channel);
1804 			}
1805 		}
1806 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
1807 
1808 pio:		/* use PIO mode */
1809 		idetim |= piix_setup_idetim_drvs(drvp);
1810 		if (drive == 0) {
1811 			idetim |= piix_setup_idetim_timings(
1812 			    drvp->PIO_mode, 0, channel);
1813 		} else {
1814 			sidetim |= piix_setup_sidetim_timings(
1815 				drvp->PIO_mode, 0, channel);
1816 			idetim =PIIX_IDETIM_SET(idetim,
1817 			    PIIX_IDETIM_SITRE, channel);
1818 		}
1819 	}
1820 	if (idedma_ctl != 0) {
1821 		/* Add software bits in status register */
1822 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
1823 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * channel),
1824 		    idedma_ctl);
1825 	}
1826 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim);
1827 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim);
1828 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg);
1829 	pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf);
1830 	pciide_print_modes(cp);
1831 }
1832 
1833 
1834 /* setup ISP and RTC fields, based on mode */
1835 static u_int32_t
1836 piix_setup_idetim_timings(mode, dma, channel)
1837 	u_int8_t mode;
1838 	u_int8_t dma;
1839 	u_int8_t channel;
1840 {
1841 
1842 	if (dma)
1843 		return PIIX_IDETIM_SET(0,
1844 		    PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) |
1845 		    PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]),
1846 		    channel);
1847 	else
1848 		return PIIX_IDETIM_SET(0,
1849 		    PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) |
1850 		    PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]),
1851 		    channel);
1852 }
1853 
1854 /* setup DTE, PPE, IE and TIME field based on PIO mode */
1855 static u_int32_t
1856 piix_setup_idetim_drvs(drvp)
1857 	struct ata_drive_datas *drvp;
1858 {
1859 	u_int32_t ret = 0;
1860 	struct channel_softc *chp = drvp->chnl_softc;
1861 	u_int8_t channel = chp->channel;
1862 	u_int8_t drive = drvp->drive;
1863 
1864 	/*
1865 	 * If drive is using UDMA, timings setups are independant
1866 	 * So just check DMA and PIO here.
1867 	 */
1868 	if (drvp->drive_flags & DRIVE_DMA) {
1869 		/* if mode = DMA mode 0, use compatible timings */
1870 		if ((drvp->drive_flags & DRIVE_DMA) &&
1871 		    drvp->DMA_mode == 0) {
1872 			drvp->PIO_mode = 0;
1873 			return ret;
1874 		}
1875 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1876 		/*
1877 		 * PIO and DMA timings are the same, use fast timings for PIO
1878 		 * too, else use compat timings.
1879 		 */
1880 		if ((piix_isp_pio[drvp->PIO_mode] !=
1881 		    piix_isp_dma[drvp->DMA_mode]) ||
1882 		    (piix_rtc_pio[drvp->PIO_mode] !=
1883 		    piix_rtc_dma[drvp->DMA_mode]))
1884 			drvp->PIO_mode = 0;
1885 		/* if PIO mode <= 2, use compat timings for PIO */
1886 		if (drvp->PIO_mode <= 2) {
1887 			ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive),
1888 			    channel);
1889 			return ret;
1890 		}
1891 	}
1892 
1893 	/*
1894 	 * Now setup PIO modes. If mode < 2, use compat timings.
1895 	 * Else enable fast timings. Enable IORDY and prefetch/post
1896 	 * if PIO mode >= 3.
1897 	 */
1898 
1899 	if (drvp->PIO_mode < 2)
1900 		return ret;
1901 
1902 	ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel);
1903 	if (drvp->PIO_mode >= 3) {
1904 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel);
1905 		ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel);
1906 	}
1907 	return ret;
1908 }
1909 
1910 /* setup values in SIDETIM registers, based on mode */
1911 static u_int32_t
1912 piix_setup_sidetim_timings(mode, dma, channel)
1913 	u_int8_t mode;
1914 	u_int8_t dma;
1915 	u_int8_t channel;
1916 {
1917 	if (dma)
1918 		return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) |
1919 		    PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel);
1920 	else
1921 		return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) |
1922 		    PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel);
1923 }
1924 
1925 void
1926 amd756_chip_map(sc, pa)
1927 	struct pciide_softc *sc;
1928 	struct pci_attach_args *pa;
1929 {
1930 	struct pciide_channel *cp;
1931 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
1932 	int channel;
1933 	pcireg_t chanenable;
1934 	bus_size_t cmdsize, ctlsize;
1935 
1936 	if (pciide_chipen(sc, pa) == 0)
1937 		return;
1938 
1939 	printf(": DMA");
1940 	pciide_mapreg_dma(sc, pa);
1941 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
1942 	    WDC_CAPABILITY_MODE;
1943 	if (sc->sc_dma_ok) {
1944 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
1945                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
1946                 sc->sc_wdcdev.irqack = pciide_irqack;
1947 	}
1948 	sc->sc_wdcdev.PIO_cap = 4;
1949 	sc->sc_wdcdev.DMA_cap = 2;
1950 	switch (sc->sc_pp->ide_product) {
1951 	case PCI_PRODUCT_AMD_766_IDE:
1952 		sc->sc_wdcdev.UDMA_cap = 5;
1953 		break;
1954 	default:
1955 		sc->sc_wdcdev.UDMA_cap = 4;
1956 		break;
1957 	}
1958 	sc->sc_wdcdev.set_modes = amd756_setup_channel;
1959 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
1960 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
1961 	chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN);
1962 
1963 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
1964 
1965 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
1966 		cp = &sc->pciide_channels[channel];
1967 		if (pciide_chansetup(sc, channel, interface) == 0)
1968 			continue;
1969 
1970 		if ((chanenable & AMD756_CHAN_EN(channel)) == 0) {
1971 			printf("%s: %s ignored (disabled)\n",
1972 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
1973 			continue;
1974 		}
1975 		pciide_map_compat_intr(pa, cp, channel, interface);
1976 		if (cp->hw_ok == 0)
1977 			continue;
1978 
1979 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
1980 		    pciide_pci_intr);
1981 
1982 		if (pciide_chan_candisable(cp)) {
1983 			chanenable &= ~AMD756_CHAN_EN(channel);
1984 		}
1985 		if (cp->hw_ok == 0) {
1986 			pciide_unmap_compat_intr(pa, cp, channel, interface);
1987 			continue;
1988 		}
1989 
1990 		amd756_setup_channel(&cp->wdc_channel);
1991 	}
1992 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN,
1993 	    chanenable);
1994 	return;
1995 }
1996 
1997 void
1998 amd756_setup_channel(chp)
1999 	struct channel_softc *chp;
2000 {
2001 	u_int32_t udmatim_reg, datatim_reg;
2002 	u_int8_t idedma_ctl;
2003 	int mode, drive;
2004 	struct ata_drive_datas *drvp;
2005 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2006 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2007 #ifndef	PCIIDE_AMD756_ENABLEDMA
2008 	int product = PCI_PRODUCT(
2009 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG));
2010 	int rev = PCI_REVISION(
2011 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2012 #endif
2013 
2014 	idedma_ctl = 0;
2015 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM);
2016 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA);
2017 	datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel);
2018 	udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel);
2019 
2020 	/* setup DMA if needed */
2021 	pciide_channel_dma_setup(cp);
2022 
2023 	for (drive = 0; drive < 2; drive++) {
2024 		drvp = &chp->ch_drive[drive];
2025 		/* If no drive, skip */
2026 		if ((drvp->drive_flags & DRIVE) == 0)
2027 			continue;
2028 		/* add timing values, setup DMA if needed */
2029 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2030 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2031 			mode = drvp->PIO_mode;
2032 			goto pio;
2033 		}
2034 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2035 		    (drvp->drive_flags & DRIVE_UDMA)) {
2036 			/* use Ultra/DMA */
2037 			drvp->drive_flags &= ~DRIVE_DMA;
2038 			udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) |
2039 			    AMD756_UDMA_EN_MTH(chp->channel, drive) |
2040 			    AMD756_UDMA_TIME(chp->channel, drive,
2041 				amd756_udma_tim[drvp->UDMA_mode]);
2042 			/* can use PIO timings, MW DMA unused */
2043 			mode = drvp->PIO_mode;
2044 		} else {
2045 			/* use Multiword DMA, but only if revision is OK */
2046 			drvp->drive_flags &= ~DRIVE_UDMA;
2047 #ifndef PCIIDE_AMD756_ENABLEDMA
2048 			/*
2049 			 * The workaround doesn't seem to be necessary
2050 			 * with all drives, so it can be disabled by
2051 			 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if
2052 			 * triggered.
2053 			 */
2054 			if (AMD756_CHIPREV_DISABLEDMA(product, rev)) {
2055 				printf("%s:%d:%d: multi-word DMA disabled due "
2056 				    "to chip revision\n",
2057 				    sc->sc_wdcdev.sc_dev.dv_xname,
2058 				    chp->channel, drive);
2059 				mode = drvp->PIO_mode;
2060 				drvp->drive_flags &= ~DRIVE_DMA;
2061 				goto pio;
2062 			}
2063 #endif
2064 			/* mode = min(pio, dma+2) */
2065 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2066 				mode = drvp->PIO_mode;
2067 			else
2068 				mode = drvp->DMA_mode + 2;
2069 		}
2070 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2071 
2072 pio:		/* setup PIO mode */
2073 		if (mode <= 2) {
2074 			drvp->DMA_mode = 0;
2075 			drvp->PIO_mode = 0;
2076 			mode = 0;
2077 		} else {
2078 			drvp->PIO_mode = mode;
2079 			drvp->DMA_mode = mode - 2;
2080 		}
2081 		datatim_reg |=
2082 		    AMD756_DATATIM_PULSE(chp->channel, drive,
2083 			amd756_pio_set[mode]) |
2084 		    AMD756_DATATIM_RECOV(chp->channel, drive,
2085 			amd756_pio_rec[mode]);
2086 	}
2087 	if (idedma_ctl != 0) {
2088 		/* Add software bits in status register */
2089 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2090 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2091 		    idedma_ctl);
2092 	}
2093 	pciide_print_modes(cp);
2094 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg);
2095 	pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg);
2096 }
2097 
2098 void
2099 apollo_chip_map(sc, pa)
2100 	struct pciide_softc *sc;
2101 	struct pci_attach_args *pa;
2102 {
2103 	struct pciide_channel *cp;
2104 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2105 	int channel;
2106 	u_int32_t ideconf;
2107 	bus_size_t cmdsize, ctlsize;
2108 	pcitag_t pcib_tag;
2109 	pcireg_t pcib_id, pcib_class;
2110 
2111 	if (pciide_chipen(sc, pa) == 0)
2112 		return;
2113 	pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2114 
2115 	pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG);
2116 	pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG);
2117 
2118 	switch (PCI_PRODUCT(pcib_id)) {
2119 	case PCI_PRODUCT_VIATECH_VT82C586_ISA:
2120 		if (PCI_REVISION(pcib_class) >= 0x02) {
2121 			printf(": ATA33");
2122 			sc->sc_wdcdev.UDMA_cap = 2;
2123 		} else {
2124 			printf(": DMA");
2125 			sc->sc_wdcdev.UDMA_cap = 0;
2126 		}
2127 		break;
2128 	case PCI_PRODUCT_VIATECH_VT82C596A:
2129 		if (PCI_REVISION(pcib_class) >= 0x12) {
2130 			printf(": ATA66");
2131 			sc->sc_wdcdev.UDMA_cap = 4;
2132 		} else {
2133 			printf(": ATA33");
2134 			sc->sc_wdcdev.UDMA_cap = 2;
2135 		}
2136 		break;
2137 
2138 	case PCI_PRODUCT_VIATECH_VT82C686A_ISA:
2139 		if (PCI_REVISION(pcib_class) >= 0x40) {
2140 			printf(": ATA100");
2141 			sc->sc_wdcdev.UDMA_cap = 5;
2142 		} else {
2143 			printf(": ATA66");
2144 			sc->sc_wdcdev.UDMA_cap = 4;
2145 		}
2146 		break;
2147 	default:
2148 		printf(": DMA");
2149 		sc->sc_wdcdev.UDMA_cap = 0;
2150 		break;
2151 	}
2152 
2153 	pciide_mapreg_dma(sc, pa);
2154 	sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2155 	    WDC_CAPABILITY_MODE;
2156 	if (sc->sc_dma_ok) {
2157 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2158 		sc->sc_wdcdev.irqack = pciide_irqack;
2159 		if (sc->sc_wdcdev.UDMA_cap > 0)
2160 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2161 	}
2162 	sc->sc_wdcdev.PIO_cap = 4;
2163 	sc->sc_wdcdev.DMA_cap = 2;
2164 	sc->sc_wdcdev.set_modes = apollo_setup_channel;
2165 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2166 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2167 
2168 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2169 
2170 	WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, "
2171 	    "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2172 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF),
2173 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC),
2174 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2175 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)),
2176 	    DEBUG_PROBE);
2177 
2178 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2179 		cp = &sc->pciide_channels[channel];
2180 		if (pciide_chansetup(sc, channel, interface) == 0)
2181 			continue;
2182 
2183 		ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF);
2184 		if ((ideconf & APO_IDECONF_EN(channel)) == 0) {
2185 			printf("%s: %s ignored (disabled)\n",
2186 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2187 			continue;
2188 		}
2189 		pciide_map_compat_intr(pa, cp, channel, interface);
2190 		if (cp->hw_ok == 0)
2191 			continue;
2192 
2193 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2194 		    pciide_pci_intr);
2195 		if (cp->hw_ok == 0) {
2196 			goto next;
2197 		}
2198 		if (pciide_chan_candisable(cp)) {
2199 			ideconf &= ~APO_IDECONF_EN(channel);
2200 			pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF,
2201 			    ideconf);
2202 		}
2203 
2204 		if (cp->hw_ok == 0)
2205 			goto next;
2206 		apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel);
2207 next:
2208 		if (cp->hw_ok == 0)
2209 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2210 	}
2211 	WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n",
2212 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM),
2213 	    pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE);
2214 }
2215 
2216 void
2217 apollo_setup_channel(chp)
2218 	struct channel_softc *chp;
2219 {
2220 	u_int32_t udmatim_reg, datatim_reg;
2221 	u_int8_t idedma_ctl;
2222 	int mode, drive;
2223 	struct ata_drive_datas *drvp;
2224 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2225 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2226 
2227 	idedma_ctl = 0;
2228 	datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM);
2229 	udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA);
2230 	datatim_reg &= ~APO_DATATIM_MASK(chp->channel);
2231 	udmatim_reg &= ~APO_UDMA_MASK(chp->channel);
2232 
2233 	/* setup DMA if needed */
2234 	pciide_channel_dma_setup(cp);
2235 
2236 	/*
2237 	 * We can't mix Ultra/33 and Ultra/66 on the same channel, so
2238 	 * downgrade to Ultra/33 if needed
2239 	 */
2240 	if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
2241 	    (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) {
2242 		/* both drives UDMA */
2243 		if (chp->ch_drive[0].UDMA_mode > 2 &&
2244 		    chp->ch_drive[1].UDMA_mode <= 2) {
2245 			/* drive 0 Ultra/66, drive 1 Ultra/33 */
2246 			chp->ch_drive[0].UDMA_mode = 2;
2247 		} else if (chp->ch_drive[1].UDMA_mode > 2 &&
2248 		    chp->ch_drive[0].UDMA_mode <= 2) {
2249 			/* drive 1 Ultra/66, drive 0 Ultra/33 */
2250 			chp->ch_drive[1].UDMA_mode = 2;
2251 		}
2252 	}
2253 
2254 	for (drive = 0; drive < 2; drive++) {
2255 		drvp = &chp->ch_drive[drive];
2256 		/* If no drive, skip */
2257 		if ((drvp->drive_flags & DRIVE) == 0)
2258 			continue;
2259 		/* add timing values, setup DMA if needed */
2260 		if (((drvp->drive_flags & DRIVE_DMA) == 0 &&
2261 		    (drvp->drive_flags & DRIVE_UDMA) == 0)) {
2262 			mode = drvp->PIO_mode;
2263 			goto pio;
2264 		}
2265 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
2266 		    (drvp->drive_flags & DRIVE_UDMA)) {
2267 			/* use Ultra/DMA */
2268 			drvp->drive_flags &= ~DRIVE_DMA;
2269 			udmatim_reg |= APO_UDMA_EN(chp->channel, drive) |
2270 			    APO_UDMA_EN_MTH(chp->channel, drive);
2271 
2272 			if (sc->sc_wdcdev.UDMA_cap == 5) {
2273 				/* 686b */
2274 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2275 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2276 				    drive, apollo_udma100_tim[drvp->UDMA_mode]);
2277 			} else if (sc->sc_wdcdev.UDMA_cap == 4) {
2278 				/* 596b or 686a */
2279 				udmatim_reg |= APO_UDMA_CLK66(chp->channel);
2280 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2281 				    drive, apollo_udma66_tim[drvp->UDMA_mode]);
2282 			} else {
2283 				/* 596a or 586b */
2284 				udmatim_reg |= APO_UDMA_TIME(chp->channel,
2285 				    drive, apollo_udma33_tim[drvp->UDMA_mode]);
2286 			}
2287 			/* can use PIO timings, MW DMA unused */
2288 			mode = drvp->PIO_mode;
2289 		} else {
2290 			/* use Multiword DMA */
2291 			drvp->drive_flags &= ~DRIVE_UDMA;
2292 			/* mode = min(pio, dma+2) */
2293 			if (drvp->PIO_mode <= (drvp->DMA_mode +2))
2294 				mode = drvp->PIO_mode;
2295 			else
2296 				mode = drvp->DMA_mode + 2;
2297 		}
2298 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2299 
2300 pio:		/* setup PIO mode */
2301 		if (mode <= 2) {
2302 			drvp->DMA_mode = 0;
2303 			drvp->PIO_mode = 0;
2304 			mode = 0;
2305 		} else {
2306 			drvp->PIO_mode = mode;
2307 			drvp->DMA_mode = mode - 2;
2308 		}
2309 		datatim_reg |=
2310 		    APO_DATATIM_PULSE(chp->channel, drive,
2311 			apollo_pio_set[mode]) |
2312 		    APO_DATATIM_RECOV(chp->channel, drive,
2313 			apollo_pio_rec[mode]);
2314 	}
2315 	if (idedma_ctl != 0) {
2316 		/* Add software bits in status register */
2317 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2318 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2319 		    idedma_ctl);
2320 	}
2321 	pciide_print_modes(cp);
2322 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg);
2323 	pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg);
2324 }
2325 
2326 void
2327 cmd_channel_map(pa, sc, channel)
2328 	struct pci_attach_args *pa;
2329 	struct pciide_softc *sc;
2330 	int channel;
2331 {
2332 	struct pciide_channel *cp = &sc->pciide_channels[channel];
2333 	bus_size_t cmdsize, ctlsize;
2334 	u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL);
2335 	pcireg_t interface;
2336 
2337 	/*
2338 	 * The 0648/0649 can be told to identify as a RAID controller.
2339 	 * In this case, we have to fake interface
2340 	 */
2341 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2342 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2343 		    PCIIDE_INTERFACE_SETTABLE(1);
2344 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2345 		    CMD_CONF_DSA1)
2346 			interface |= PCIIDE_INTERFACE_PCI(0) |
2347 			    PCIIDE_INTERFACE_PCI(1);
2348 	} else {
2349 		interface = PCI_INTERFACE(pa->pa_class);
2350 	}
2351 
2352 	sc->wdc_chanarray[channel] = &cp->wdc_channel;
2353 	cp->name = PCIIDE_CHANNEL_NAME(channel);
2354 	cp->wdc_channel.channel = channel;
2355 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2356 
2357 	if (channel > 0) {
2358 		cp->wdc_channel.ch_queue =
2359 		    sc->pciide_channels[0].wdc_channel.ch_queue;
2360 	} else {
2361 		cp->wdc_channel.ch_queue =
2362 		    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2363 	}
2364 	if (cp->wdc_channel.ch_queue == NULL) {
2365 		printf(
2366 		    "%s: %s cannot allocate memory for command queue",
2367 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2368 		return;
2369 	}
2370 
2371 	/*
2372 	 * with a CMD PCI64x, if we get here, the first channel is enabled:
2373 	 * there's no way to disable the first channel without disabling
2374 	 * the whole device
2375 	 */
2376 	 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) {
2377 		printf("%s: %s ignored (disabled)\n",
2378 		    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2379 		return;
2380 	}
2381 	cp->hw_ok = 1;
2382 	pciide_map_compat_intr(pa, cp, channel, interface);
2383 	if (cp->hw_ok == 0)
2384 		return;
2385 	pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr);
2386 	if (cp->hw_ok == 0) {
2387 		pciide_unmap_compat_intr(pa, cp, channel, interface);
2388 		return;
2389 	}
2390 	if (channel == 1) {
2391 		if (pciide_chan_candisable(cp)) {
2392 			ctrl &= ~CMD_CTRL_2PORT;
2393 			pciide_pci_write(pa->pa_pc, pa->pa_tag,
2394 			    CMD_CTRL, ctrl);
2395 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2396 		}
2397 	}
2398 }
2399 
2400 int
2401 cmd_pci_intr(arg)
2402 	void *arg;
2403 {
2404 	struct pciide_softc *sc = arg;
2405 	struct pciide_channel *cp;
2406 	struct channel_softc *wdc_cp;
2407 	int i, rv, crv;
2408 	u_int32_t priirq, secirq;
2409 
2410 	rv = 0;
2411 	priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2412 	secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2413 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
2414 		cp = &sc->pciide_channels[i];
2415 		wdc_cp = &cp->wdc_channel;
2416 		/* If a compat channel skip. */
2417 		if (cp->compat)
2418 			continue;
2419 		if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) ||
2420 		    (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) {
2421 			crv = wdcintr(wdc_cp);
2422 			if (crv == 0) {
2423 #if 0
2424 				printf("%s:%d: bogus intr\n",
2425 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
2426 #endif
2427 			} else
2428 				rv = 1;
2429 		}
2430 	}
2431 	return rv;
2432 }
2433 
2434 void
2435 cmd_chip_map(sc, pa)
2436 	struct pciide_softc *sc;
2437 	struct pci_attach_args *pa;
2438 {
2439 	int channel;
2440 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2441 	/*
2442  	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2443 	 * and base adresses registers can be disabled at
2444  	 * hardware level. In this case, the device is wired
2445 	 * in compat mode and its first channel is always enabled,
2446 	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2447 	 * In fact, it seems that the first channel of the CMD PCI0640
2448 	 * can't be disabled.
2449  	 */
2450 
2451 #ifdef PCIIDE_CMD064x_DISABLE
2452 	if (pciide_chipen(sc, pa) == 0)
2453 		return;
2454 #endif
2455 
2456 	printf(": no DMA");
2457 	sc->sc_dma_ok = 0;
2458 
2459 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2460 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2461 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
2462 
2463 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2464 
2465 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2466 		cmd_channel_map(pa, sc, channel);
2467 	}
2468 }
2469 
2470 void
2471 cmd0643_9_chip_map(sc, pa)
2472 	struct pciide_softc *sc;
2473 	struct pci_attach_args *pa;
2474 {
2475 	struct pciide_channel *cp;
2476 	int channel;
2477 	int rev = PCI_REVISION(
2478 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG));
2479 	pcireg_t interface;
2480 
2481 	/*
2482 	 * The 0648/0649 can be told to identify as a RAID controller.
2483 	 * In this case, we have to fake interface
2484 	 */
2485 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) {
2486 		interface = PCIIDE_INTERFACE_SETTABLE(0) |
2487 		    PCIIDE_INTERFACE_SETTABLE(1);
2488 		if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) &
2489 		    CMD_CONF_DSA1)
2490 			interface |= PCIIDE_INTERFACE_PCI(0) |
2491 			    PCIIDE_INTERFACE_PCI(1);
2492 	} else {
2493 		interface = PCI_INTERFACE(pa->pa_class);
2494 	}
2495 
2496 	/*
2497 	 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE
2498 	 * and base adresses registers can be disabled at
2499 	 * hardware level. In this case, the device is wired
2500 	 * in compat mode and its first channel is always enabled,
2501  	 * but we can't rely on PCI_COMMAND_IO_ENABLE.
2502 	 * In fact, it seems that the first channel of the CMD PCI0640
2503 	 * can't be disabled.
2504 	*/
2505 
2506 #ifdef PCIIDE_CMD064x_DISABLE
2507 	if (pciide_chipen(sc, pa) == 0)
2508 		return;
2509 #endif
2510 	printf(": DMA");
2511 	pciide_mapreg_dma(sc, pa);
2512 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2513 	    WDC_CAPABILITY_MODE;
2514 	if (sc->sc_dma_ok) {
2515 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2516 		switch (sc->sc_pp->ide_product) {
2517 		case PCI_PRODUCT_CMDTECH_649:
2518 		case PCI_PRODUCT_CMDTECH_648:
2519 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2520 			sc->sc_wdcdev.UDMA_cap = 4;
2521 			sc->sc_wdcdev.irqack = cmd646_9_irqack;
2522 			break;
2523                 case PCI_PRODUCT_CMDTECH_646:
2524                         if (rev >= CMD0646U2_REV) {
2525                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2526                                 sc->sc_wdcdev.UDMA_cap = 2;
2527                         } else if (rev >= CMD0646U_REV) {
2528                         /*
2529                          * Linux's driver claims that the 646U is broken
2530                          * with UDMA. Only enable it if we know what we're
2531                          * doing
2532                          */
2533 #ifdef PCIIDE_CMD0646U_ENABLEUDMA
2534                                 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2535                                 sc->sc_wdcdev.UDMA_cap = 2;
2536 #endif
2537                                 /* explicitly disable UDMA */
2538                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2539                                     CMD_UDMATIM(0), 0);
2540                                 pciide_pci_write(sc->sc_pc, sc->sc_tag,
2541                                     CMD_UDMATIM(1), 0);
2542                         }
2543                         sc->sc_wdcdev.irqack = cmd646_9_irqack;
2544                         break;
2545 		default:
2546 			sc->sc_wdcdev.irqack = pciide_irqack;
2547 		}
2548 	}
2549 
2550 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2551 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2552 	sc->sc_wdcdev.PIO_cap = 4;
2553 	sc->sc_wdcdev.DMA_cap = 2;
2554 	sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel;
2555 
2556 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2557 
2558 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n",
2559 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2560 		pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2561 		DEBUG_PROBE);
2562 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2563 		cp = &sc->pciide_channels[channel];
2564 		cmd_channel_map(pa, sc, channel);
2565 		if (cp->hw_ok == 0)
2566 			continue;
2567 		cmd0643_9_setup_channel(&cp->wdc_channel);
2568 	}
2569 	/*
2570 	 * note - this also makes sure we clear the irq disable and reset
2571 	 * bits
2572 	 */
2573 	pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE);
2574 	WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n",
2575 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54),
2576 	    pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)),
2577 	    DEBUG_PROBE);
2578 }
2579 
2580 void
2581 cmd0643_9_setup_channel(chp)
2582 	struct channel_softc *chp;
2583 {
2584 	struct ata_drive_datas *drvp;
2585 	u_int8_t tim;
2586 	u_int32_t idedma_ctl, udma_reg;
2587 	int drive;
2588 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2589 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2590 
2591 	idedma_ctl = 0;
2592 	/* setup DMA if needed */
2593 	pciide_channel_dma_setup(cp);
2594 
2595 	for (drive = 0; drive < 2; drive++) {
2596 		drvp = &chp->ch_drive[drive];
2597 		/* If no drive, skip */
2598 		if ((drvp->drive_flags & DRIVE) == 0)
2599 			continue;
2600 		/* add timing values, setup DMA if needed */
2601 		tim = cmd0643_9_data_tim_pio[drvp->PIO_mode];
2602 		if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) {
2603 			if (drvp->drive_flags & DRIVE_UDMA) {
2604 				/* UltraDMA on a 646U2, 0648 or 0649 */
2605 				drvp->drive_flags &= ~DRIVE_DMA;
2606 				udma_reg = pciide_pci_read(sc->sc_pc,
2607 				    sc->sc_tag, CMD_UDMATIM(chp->channel));
2608 				if (drvp->UDMA_mode > 2 &&
2609 				    (pciide_pci_read(sc->sc_pc, sc->sc_tag,
2610 				    CMD_BICSR) &
2611 				    CMD_BICSR_80(chp->channel)) == 0)
2612 					drvp->UDMA_mode = 2;
2613 				if (drvp->UDMA_mode > 2)
2614 					udma_reg &= ~CMD_UDMATIM_UDMA33(drive);
2615 				else if (sc->sc_wdcdev.UDMA_cap > 2)
2616 					udma_reg |= CMD_UDMATIM_UDMA33(drive);
2617 				udma_reg |= CMD_UDMATIM_UDMA(drive);
2618 				udma_reg &= ~(CMD_UDMATIM_TIM_MASK <<
2619 				    CMD_UDMATIM_TIM_OFF(drive));
2620 				udma_reg |=
2621 				    (cmd0646_9_tim_udma[drvp->UDMA_mode] <<
2622 				    CMD_UDMATIM_TIM_OFF(drive));
2623 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
2624 				    CMD_UDMATIM(chp->channel), udma_reg);
2625 			} else {
2626 				/*
2627 				 * use Multiword DMA.
2628 				 * Timings will be used for both PIO and DMA,
2629 				 * so adjust DMA mode if needed
2630 				 * if we have a 0646U2/8/9, turn off UDMA
2631 				 */
2632 				if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) {
2633 					udma_reg = pciide_pci_read(sc->sc_pc,
2634 					    sc->sc_tag,
2635 					    CMD_UDMATIM(chp->channel));
2636 					udma_reg &= ~CMD_UDMATIM_UDMA(drive);
2637 					pciide_pci_write(sc->sc_pc, sc->sc_tag,
2638 					    CMD_UDMATIM(chp->channel),
2639 					    udma_reg);
2640 				}
2641 				if (drvp->PIO_mode >= 3 &&
2642 				    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
2643 					drvp->DMA_mode = drvp->PIO_mode - 2;
2644 				}
2645 				tim = cmd0643_9_data_tim_dma[drvp->DMA_mode];
2646 			}
2647 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2648 		}
2649 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
2650 		    CMD_DATA_TIM(chp->channel, drive), tim);
2651 	}
2652 	if (idedma_ctl != 0) {
2653 		/* Add software bits in status register */
2654 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2655 		    IDEDMA_CTL + (IDEDMA_SCH_OFFSET * chp->channel),
2656 		    idedma_ctl);
2657 	}
2658 	pciide_print_modes(cp);
2659 }
2660 
2661 void
2662 cmd646_9_irqack(chp)
2663         struct channel_softc *chp;
2664 {
2665         u_int32_t priirq, secirq;
2666         struct pciide_channel *cp = (struct pciide_channel*)chp;
2667         struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2668 
2669         if (chp->channel == 0) {
2670                 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF);
2671                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq);
2672         } else {
2673                 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23);
2674                 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq);
2675         }
2676         pciide_irqack(chp);
2677 }
2678 
2679 void
2680 cy693_chip_map(sc, pa)
2681 	struct pciide_softc *sc;
2682 	struct pci_attach_args *pa;
2683 {
2684 	struct pciide_channel *cp;
2685 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2686 	bus_size_t cmdsize, ctlsize;
2687 
2688 	if (pciide_chipen(sc, pa) == 0)
2689 		return;
2690 	/*
2691 	 * this chip has 2 PCI IDE functions, one for primary and one for
2692 	 * secondary. So we need to call pciide_mapregs_compat() with
2693 	 * the real channel
2694 	 */
2695 	if (pa->pa_function == 1) {
2696 		sc->sc_cy_compatchan = 0;
2697 	} else if (pa->pa_function == 2) {
2698 		sc->sc_cy_compatchan = 1;
2699 	} else {
2700 		printf(": unexpected PCI function %d\n", pa->pa_function);
2701 		return;
2702 	}
2703 
2704 	if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) {
2705 		printf(": DMA");
2706 		pciide_mapreg_dma(sc, pa);
2707 	} else {
2708 		printf(": no DMA");
2709 		sc->sc_dma_ok = 0;
2710 	}
2711 
2712 	sc->sc_cy_handle = cy82c693_init(pa->pa_iot);
2713 	if (sc->sc_cy_handle == NULL) {
2714 		printf(", (unable to map ctl registers)");
2715 		sc->sc_dma_ok = 0;
2716 	}
2717 
2718 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2719 	    WDC_CAPABILITY_MODE;
2720 	if (sc->sc_dma_ok) {
2721 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2722 		sc->sc_wdcdev.irqack = pciide_irqack;
2723 	}
2724 	sc->sc_wdcdev.PIO_cap = 4;
2725 	sc->sc_wdcdev.DMA_cap = 2;
2726 	sc->sc_wdcdev.set_modes = cy693_setup_channel;
2727 
2728 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2729 	sc->sc_wdcdev.nchannels = 1;
2730 
2731 	/* Only one channel for this chip; if we are here it's enabled */
2732 	cp = &sc->pciide_channels[0];
2733 	sc->wdc_chanarray[0] = &cp->wdc_channel;
2734 	cp->name = PCIIDE_CHANNEL_NAME(0);
2735 	cp->wdc_channel.channel = 0;
2736 	cp->wdc_channel.wdc = &sc->sc_wdcdev;
2737 	cp->wdc_channel.ch_queue =
2738 	    malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT);
2739 	if (cp->wdc_channel.ch_queue == NULL) {
2740 		printf(": cannot allocate memory for command queue\n");
2741 		return;
2742 	}
2743 	printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0),
2744 	    (interface & PCIIDE_INTERFACE_SETTABLE(0)) ?
2745 	    "configured" : "wired");
2746 	if (interface & PCIIDE_INTERFACE_PCI(0)) {
2747 		printf("native-PCI\n");
2748 		cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize,
2749 		    pciide_pci_intr);
2750 	} else {
2751 		printf("compatibility\n");
2752 		cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan,
2753 		    &cmdsize, &ctlsize);
2754 	}
2755 
2756 	cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
2757 	cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
2758 	pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface);
2759 	if (cp->hw_ok == 0)
2760 		return;
2761 	wdcattach(&cp->wdc_channel);
2762 	if (pciide_chan_candisable(cp)) {
2763 		pci_conf_write(sc->sc_pc, sc->sc_tag,
2764 		    PCI_COMMAND_STATUS_REG, 0);
2765 	}
2766 	if (cp->hw_ok == 0) {
2767 		pciide_unmap_compat_intr(pa, cp, sc->sc_cy_compatchan,
2768 		    interface);
2769 		return;
2770 	}
2771 
2772 	WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n",
2773 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE);
2774 	cy693_setup_channel(&cp->wdc_channel);
2775 	WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n",
2776 	    pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE);
2777 }
2778 
2779 void
2780 cy693_setup_channel(chp)
2781 	struct channel_softc *chp;
2782 {
2783 	struct ata_drive_datas *drvp;
2784 	int drive;
2785 	u_int32_t cy_cmd_ctrl;
2786 	u_int32_t idedma_ctl;
2787 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2788 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2789 	int dma_mode = -1;
2790 
2791 	cy_cmd_ctrl = idedma_ctl = 0;
2792 
2793 	/* setup DMA if needed */
2794 	pciide_channel_dma_setup(cp);
2795 
2796 	for (drive = 0; drive < 2; drive++) {
2797 		drvp = &chp->ch_drive[drive];
2798 		/* If no drive, skip */
2799 		if ((drvp->drive_flags & DRIVE) == 0)
2800 			continue;
2801 		/* add timing values, setup DMA if needed */
2802 		if (drvp->drive_flags & DRIVE_DMA) {
2803 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2804 			/* use Multiword DMA */
2805 			if (dma_mode == -1 || dma_mode > drvp->DMA_mode)
2806 				dma_mode = drvp->DMA_mode;
2807 		}
2808 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2809 		    CY_CMD_CTRL_IOW_PULSE_OFF(drive));
2810 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2811 		    CY_CMD_CTRL_IOW_REC_OFF(drive));
2812 		cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] <<
2813 		    CY_CMD_CTRL_IOR_PULSE_OFF(drive));
2814 		cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] <<
2815 		    CY_CMD_CTRL_IOR_REC_OFF(drive));
2816 	}
2817 	pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl);
2818 	chp->ch_drive[0].DMA_mode = dma_mode;
2819 	chp->ch_drive[1].DMA_mode = dma_mode;
2820 
2821 	if (dma_mode == -1)
2822 		dma_mode = 0;
2823 
2824 	if (sc->sc_cy_handle != NULL) {
2825 		/* Note: `multiple' is implied. */
2826 		cy82c693_write(sc->sc_cy_handle,
2827 		    (sc->sc_cy_compatchan == 0) ?
2828 		    CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode);
2829 	}
2830 
2831 	pciide_print_modes(cp);
2832 
2833 	if (idedma_ctl != 0) {
2834 		/* Add software bits in status register */
2835 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2836 		    IDEDMA_CTL, idedma_ctl);
2837 	}
2838 }
2839 
2840 void
2841 sis_chip_map(sc, pa)
2842 	struct pciide_softc *sc;
2843 	struct pci_attach_args *pa;
2844 {
2845 	struct pciide_channel *cp;
2846 	int channel;
2847 	u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0);
2848 	pcireg_t interface = PCI_INTERFACE(pa->pa_class);
2849 	pcireg_t rev = PCI_REVISION(pa->pa_class);
2850 	bus_size_t cmdsize, ctlsize;
2851 	pcitag_t pchb_tag;
2852 	pcireg_t pchb_id, pchb_class;
2853 
2854 	if (pciide_chipen(sc, pa) == 0)
2855 		return;
2856 
2857 	printf(": DMA");
2858 	pciide_mapreg_dma(sc, pa);
2859 
2860 	/* get a PCI tag for the host bridge (function 0 of the same device) */
2861 	pchb_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0);
2862 	/* and read ID and rev of the ISA bridge */
2863 	pchb_id = pci_conf_read(sc->sc_pc, pchb_tag, PCI_ID_REG);
2864 	pchb_class = pci_conf_read(sc->sc_pc, pchb_tag, PCI_CLASS_REG);
2865 
2866 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
2867 	    WDC_CAPABILITY_MODE;
2868 	if (sc->sc_dma_ok) {
2869 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
2870 		sc->sc_wdcdev.irqack = pciide_irqack;
2871 		/*
2872 		 * controllers with rev > 0xd0 support UDMA 2 at least
2873 		 * controllers associated to a rev 0x2 530 Host to PCI Bridge
2874 		 * have problems with UDMA
2875 		 */
2876 		if (rev >= 0xd0 &&
2877 		    (PCI_PRODUCT(pchb_id) != PCI_PRODUCT_SIS_SiS530 ||
2878 		    PCI_REVISION(pchb_class) >= 0x03))
2879 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
2880 	}
2881 
2882 	sc->sc_wdcdev.PIO_cap = 4;
2883 	sc->sc_wdcdev.DMA_cap = 2;
2884 	if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA)
2885 		sc->sc_wdcdev.UDMA_cap = 2;
2886 	sc->sc_wdcdev.set_modes = sis_setup_channel;
2887 
2888 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
2889 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
2890 
2891 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
2892 
2893 	pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC,
2894 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) |
2895 	    SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE);
2896 
2897 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
2898 		cp = &sc->pciide_channels[channel];
2899 		if (pciide_chansetup(sc, channel, interface) == 0)
2900 			continue;
2901 		if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) ||
2902 	 	    (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) {
2903 			printf("%s: %s ignored (disabled)\n",
2904 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
2905 			continue;
2906 		}
2907 		pciide_map_compat_intr(pa, cp, channel, interface);
2908 		if (cp->hw_ok == 0)
2909 			continue;
2910 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
2911 		    pciide_pci_intr);
2912 		if (cp->hw_ok == 0) {
2913 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2914 			continue;
2915 		}
2916 		if (pciide_chan_candisable(cp)) {
2917 			if (channel == 0)
2918 				sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN;
2919 			else
2920 				sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN;
2921 			pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0,
2922 			    sis_ctr0);
2923 		}
2924 		if (cp->hw_ok == 0) {
2925 			pciide_unmap_compat_intr(pa, cp, channel, interface);
2926 			continue;
2927 		}
2928 		sis_setup_channel(&cp->wdc_channel);
2929 	}
2930 }
2931 
2932 void
2933 sis_setup_channel(chp)
2934 	struct channel_softc *chp;
2935 {
2936 	struct ata_drive_datas *drvp;
2937 	int drive;
2938 	u_int32_t sis_tim;
2939 	u_int32_t idedma_ctl;
2940 	struct pciide_channel *cp = (struct pciide_channel*)chp;
2941 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
2942 
2943 	WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for "
2944 	    "channel %d 0x%x\n", chp->channel,
2945 	    pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))),
2946 	    DEBUG_PROBE);
2947 	sis_tim = 0;
2948 	idedma_ctl = 0;
2949 	/* setup DMA if needed */
2950 	pciide_channel_dma_setup(cp);
2951 
2952 	for (drive = 0; drive < 2; drive++) {
2953 		drvp = &chp->ch_drive[drive];
2954 		/* If no drive, skip */
2955 		if ((drvp->drive_flags & DRIVE) == 0)
2956 			continue;
2957 		/* add timing values, setup DMA if needed */
2958 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
2959 		    (drvp->drive_flags & DRIVE_UDMA) == 0)
2960 			goto pio;
2961 
2962 		if (drvp->drive_flags & DRIVE_UDMA) {
2963 			/* use Ultra/DMA */
2964 			drvp->drive_flags &= ~DRIVE_DMA;
2965 			sis_tim |= sis_udma_tim[drvp->UDMA_mode] <<
2966 			    SIS_TIM_UDMA_TIME_OFF(drive);
2967 			sis_tim |= SIS_TIM_UDMA_EN(drive);
2968 		} else {
2969 			/*
2970 			 * use Multiword DMA
2971 			 * Timings will be used for both PIO and DMA,
2972 			 * so adjust DMA mode if needed
2973 			 */
2974 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
2975 				drvp->PIO_mode = drvp->DMA_mode + 2;
2976 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
2977 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
2978 				    drvp->PIO_mode - 2 : 0;
2979 			if (drvp->DMA_mode == 0)
2980 				drvp->PIO_mode = 0;
2981 		}
2982 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
2983 pio:		sis_tim |= sis_pio_act[drvp->PIO_mode] <<
2984 		    SIS_TIM_ACT_OFF(drive);
2985 		sis_tim |= sis_pio_rec[drvp->PIO_mode] <<
2986 		    SIS_TIM_REC_OFF(drive);
2987 	}
2988 	WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for "
2989 	    "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE);
2990 	pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim);
2991 	if (idedma_ctl != 0) {
2992 		/* Add software bits in status register */
2993 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
2994 		    IDEDMA_CTL, idedma_ctl);
2995 	}
2996 	pciide_print_modes(cp);
2997 }
2998 
2999 void
3000 natsemi_chip_map(sc, pa)
3001 	struct pciide_softc *sc;
3002 	struct pci_attach_args *pa;
3003 {
3004 	struct pciide_channel *cp;
3005 	int channel;
3006 	pcireg_t interface, ctl;
3007 	bus_size_t cmdsize, ctlsize;
3008 
3009 	if (pciide_chipen(sc, pa) == 0)
3010 		return;
3011 
3012 	printf(": DMA");
3013 	pciide_mapreg_dma(sc, pa);
3014 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16;
3015 
3016 	if (sc->sc_dma_ok) {
3017 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
3018 		sc->sc_wdcdev.irqack = pciide_irqack;
3019 	}
3020 
3021 	pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7);
3022 
3023 	/*
3024 	 * Mask off interrupts from both channels, appropriate channel(s)
3025 	 * will be unmasked later.
3026 	 */
3027 	pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2,
3028 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) |
3029 	    NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1));
3030 
3031 	sc->sc_wdcdev.PIO_cap = 4;
3032 	sc->sc_wdcdev.DMA_cap = 2;
3033 	sc->sc_wdcdev.set_modes = natsemi_setup_channel;
3034 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3035 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3036 
3037 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3038 	    PCI_CLASS_REG));
3039 	interface &= ~PCIIDE_CHANSTATUS_EN;	/* Reserved on PC87415 */
3040 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3041 
3042 	/* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */
3043 	ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1);
3044 	if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1)))
3045 		ctl &= ~NATSEMI_CTRL1_INTAMASK;
3046 	else
3047 		ctl |= NATSEMI_CTRL1_INTAMASK;
3048 	pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl);
3049 
3050 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3051 		cp = &sc->pciide_channels[channel];
3052 		if (pciide_chansetup(sc, channel, interface) == 0)
3053 			continue;
3054 
3055 		pciide_map_compat_intr(pa, cp, channel, interface);
3056 		if (cp->hw_ok == 0)
3057 			continue;
3058 
3059 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3060 		    natsemi_pci_intr);
3061 		if (cp->hw_ok == 0) {
3062 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3063 			continue;
3064 		}
3065 		natsemi_setup_channel(&cp->wdc_channel);
3066 	}
3067 
3068 }
3069 
3070 void
3071 natsemi_setup_channel(chp)
3072 	struct channel_softc *chp;
3073 {
3074 	struct ata_drive_datas *drvp;
3075 	int drive, ndrives = 0;
3076 	u_int32_t idedma_ctl = 0;
3077 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3078 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3079 
3080 	/* setup DMA if needed */
3081 	pciide_channel_dma_setup(cp);
3082 
3083 	for (drive = 0; drive < 2; drive++) {
3084 		drvp = &chp->ch_drive[drive];
3085 		/* If no drive, skip */
3086 		if ((drvp->drive_flags & DRIVE) == 0)
3087 			continue;
3088 
3089 		ndrives++;
3090 		/* add timing values, setup DMA if needed */
3091 		if ((drvp->drive_flags & DRIVE_DMA) == 0)
3092 			goto pio;
3093 
3094 		/*
3095 		 * use Multiword DMA
3096 		 * Timings will be used for both PIO and DMA,
3097 		 * so adjust DMA mode if needed
3098 		 */
3099 		if (drvp->PIO_mode >= 3 &&
3100 		    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3101 			drvp->DMA_mode = drvp->PIO_mode - 2;
3102 		}
3103 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3104 pio:
3105 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3106 		    NATSEMI_RTREG(chp->channel, drive), 0x85);
3107 		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3108 		    NATSEMI_WTREG(chp->channel, drive), 0x85);
3109 	}
3110 	if (idedma_ctl != 0) {
3111 		/* Add software bits in status register */
3112 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3113 		    IDEDMA_CTL, idedma_ctl);
3114 	}
3115 	if (ndrives > 0) {
3116 		/* Unmask the channel if at least one drive is found */
3117 		pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2,
3118 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) &
3119 		    ~(NATSEMI_CHMASK(chp->channel)));
3120 	}
3121 	pciide_print_modes(cp);
3122 
3123 	/* Go ahead and ack interrupts generated during probe. */
3124 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3125 	    (chp->channel * IDEDMA_SCH_OFFSET) + IDEDMA_CTL,
3126 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3127 		(chp->channel * IDEDMA_SCH_OFFSET) + IDEDMA_CTL));
3128 }
3129 
3130 int
3131 natsemi_pci_intr(arg)
3132 	void *arg;
3133 {
3134 	struct pciide_softc *sc = arg;
3135 	struct pciide_channel *cp;
3136 	struct channel_softc *wdc_cp;
3137 	int i, rv, crv;
3138 	u_int8_t ide_dmactl, msk;
3139 
3140 	rv = 0;
3141 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3142 		cp = &sc->pciide_channels[i];
3143 		wdc_cp = &cp->wdc_channel;
3144 		/* If a compat channel skip. */
3145 		if (cp->compat)
3146 			continue;
3147 
3148 		/* If this channel is masked, skip it. */
3149 		msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2);
3150 		if (msk & NATSEMI_CHMASK(i))
3151 			continue;
3152 
3153 		/* Get intr status */
3154 		ide_dmactl = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3155 		    (i * IDEDMA_SCH_OFFSET) + IDEDMA_CTL);
3156 		if (ide_dmactl & IDEDMA_CTL_ERR)
3157 			printf("%s:%d: error intr\n",
3158 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3159 		if (ide_dmactl & IDEDMA_CTL_INTR) {
3160 			crv = wdcintr(wdc_cp);
3161 			if (crv == 0)
3162 				printf("%s:%d: bogus intr\n",
3163 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3164 			else
3165 				rv = 1;
3166 		}
3167 	}
3168 	return rv;
3169 }
3170 
3171 void
3172 acer_chip_map(sc, pa)
3173 	struct pciide_softc *sc;
3174 	struct pci_attach_args *pa;
3175 {
3176 	struct pciide_channel *cp;
3177 	int channel;
3178 	pcireg_t cr, interface;
3179 	bus_size_t cmdsize, ctlsize;
3180 	pcireg_t rev = PCI_REVISION(pa->pa_class);
3181 
3182 	if (pciide_chipen(sc, pa) == 0)
3183 		return;
3184 
3185 	printf(": DMA");
3186 	pciide_mapreg_dma(sc, pa);
3187 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3188 	    WDC_CAPABILITY_MODE;
3189 
3190 	if (rev < 0xC4)
3191 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA;
3192 
3193 	if (sc->sc_dma_ok) {
3194 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA;
3195 		if (rev >= 0x20) {
3196 			sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA;
3197 			if (rev >= 0xC4)
3198 				sc->sc_wdcdev.UDMA_cap = 5;
3199 			else if (rev >= 0xC2)
3200 				sc->sc_wdcdev.UDMA_cap = 4;
3201 			else
3202 				sc->sc_wdcdev.UDMA_cap = 2;
3203 		}
3204 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3205 		sc->sc_wdcdev.irqack = pciide_irqack;
3206 	}
3207 
3208 	sc->sc_wdcdev.PIO_cap = 4;
3209 	sc->sc_wdcdev.DMA_cap = 2;
3210 	sc->sc_wdcdev.set_modes = acer_setup_channel;
3211 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3212 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3213 
3214 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC,
3215 	    (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) |
3216 		ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE);
3217 
3218 	/* Enable "microsoft register bits" R/W. */
3219 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3,
3220 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI);
3221 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1,
3222 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) &
3223 	    ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1)));
3224 	pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2,
3225 	    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) &
3226 	    ~ACER_CHANSTATUSREGS_RO);
3227 	cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG);
3228 	cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT);
3229 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr);
3230 	/* Don't use cr, re-read the real register content instead */
3231 	interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag,
3232 	    PCI_CLASS_REG));
3233 
3234 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3235 
3236 	/* From linux: enable "Cable Detection" */
3237 	if (rev >= 0xC2) {
3238 		pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B,
3239 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B)
3240 		    | ACER_0x4B_CDETECT);
3241 		/* set south-bridge's enable bit, m1533, 0x79 */
3242 		if (rev == 0xC2)
3243 			/* 1543C-B0 (m1533, 0x79, bit 2) */
3244 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3245 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3246 			    | ACER_0x79_REVC2_EN);
3247 		else
3248 			/* 1553/1535 (m1533, 0x79, bit 1) */
3249 			pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x79,
3250 			    pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x79)
3251 			    | ACER_0x79_EN);
3252 	}
3253 
3254 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3255 		cp = &sc->pciide_channels[channel];
3256 		if (pciide_chansetup(sc, channel, interface) == 0)
3257 			continue;
3258 		if ((interface & PCIIDE_CHAN_EN(channel)) == 0) {
3259 			printf("%s: %s ignored (disabled)\n",
3260 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3261 			continue;
3262 		}
3263 		pciide_map_compat_intr(pa, cp, channel, interface);
3264 		if (cp->hw_ok == 0)
3265 			continue;
3266 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3267 		    (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr);
3268 		if (cp->hw_ok == 0) {
3269 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3270 			continue;
3271 		}
3272 		if (pciide_chan_candisable(cp)) {
3273 			cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT);
3274 			pci_conf_write(sc->sc_pc, sc->sc_tag,
3275 			    PCI_CLASS_REG, cr);
3276 		}
3277 		if (cp->hw_ok == 0) {
3278 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3279 			continue;
3280 		}
3281 		acer_setup_channel(&cp->wdc_channel);
3282 	}
3283 }
3284 
3285 void
3286 acer_setup_channel(chp)
3287 	struct channel_softc *chp;
3288 {
3289 	struct ata_drive_datas *drvp;
3290 	int drive;
3291 	u_int32_t acer_fifo_udma;
3292 	u_int32_t idedma_ctl;
3293 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3294 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3295 
3296 	idedma_ctl = 0;
3297 	acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA);
3298 	WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n",
3299 	    acer_fifo_udma), DEBUG_PROBE);
3300 	/* setup DMA if needed */
3301 	pciide_channel_dma_setup(cp);
3302 
3303 	if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) &
3304 	    DRIVE_UDMA)	{	/* check 80 pins cable */
3305 		if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) &
3306 		    ACER_0x4A_80PIN(chp->channel)) {
3307 			if (chp->ch_drive[0].UDMA_mode > 2)
3308 				chp->ch_drive[0].UDMA_mode = 2;
3309 			if (chp->ch_drive[1].UDMA_mode > 2)
3310 				chp->ch_drive[1].UDMA_mode = 2;
3311 		}
3312 	}
3313 
3314 	for (drive = 0; drive < 2; drive++) {
3315 		drvp = &chp->ch_drive[drive];
3316 		/* If no drive, skip */
3317 		if ((drvp->drive_flags & DRIVE) == 0)
3318 			continue;
3319 		WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for "
3320 		    "channel %d drive %d 0x%x\n", chp->channel, drive,
3321 		    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3322 		    ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE);
3323 		/* clear FIFO/DMA mode */
3324 		acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) |
3325 		    ACER_UDMA_EN(chp->channel, drive) |
3326 		    ACER_UDMA_TIM(chp->channel, drive, 0x7));
3327 
3328 		/* add timing values, setup DMA if needed */
3329 		if ((drvp->drive_flags & DRIVE_DMA) == 0 &&
3330 		    (drvp->drive_flags & DRIVE_UDMA) == 0) {
3331 			acer_fifo_udma |=
3332 			    ACER_FTH_OPL(chp->channel, drive, 0x1);
3333 			goto pio;
3334 		}
3335 
3336 		acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2);
3337 		if (drvp->drive_flags & DRIVE_UDMA) {
3338 			/* use Ultra/DMA */
3339 			drvp->drive_flags &= ~DRIVE_DMA;
3340 			acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive);
3341 			acer_fifo_udma |=
3342 			    ACER_UDMA_TIM(chp->channel, drive,
3343 				acer_udma[drvp->UDMA_mode]);
3344 			/* XXX disable if one drive < UDMA3 ? */
3345 			if (drvp->UDMA_mode >= 3) {
3346 				pciide_pci_write(sc->sc_pc, sc->sc_tag,
3347 				    ACER_0x4B,
3348 				    pciide_pci_read(sc->sc_pc, sc->sc_tag,
3349 				        ACER_0x4B) | ACER_0x4B_UDMA66);
3350 			}
3351 		} else {
3352 			/*
3353 			 * use Multiword DMA
3354 			 * Timings will be used for both PIO and DMA,
3355 			 * so adjust DMA mode if needed
3356 			 */
3357 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
3358 				drvp->PIO_mode = drvp->DMA_mode + 2;
3359 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
3360 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
3361 				    drvp->PIO_mode - 2 : 0;
3362 			if (drvp->DMA_mode == 0)
3363 				drvp->PIO_mode = 0;
3364 		}
3365 		idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3366 pio:		pciide_pci_write(sc->sc_pc, sc->sc_tag,
3367 		    ACER_IDETIM(chp->channel, drive),
3368 		    acer_pio[drvp->PIO_mode]);
3369 	}
3370 	WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n",
3371 	    acer_fifo_udma), DEBUG_PROBE);
3372 	pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma);
3373 	if (idedma_ctl != 0) {
3374 		/* Add software bits in status register */
3375 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3376 		    IDEDMA_CTL, idedma_ctl);
3377 	}
3378 	pciide_print_modes(cp);
3379 }
3380 
3381 int
3382 acer_pci_intr(arg)
3383 	void *arg;
3384 {
3385 	struct pciide_softc *sc = arg;
3386 	struct pciide_channel *cp;
3387 	struct channel_softc *wdc_cp;
3388 	int i, rv, crv;
3389 	u_int32_t chids;
3390 
3391 	rv = 0;
3392 	chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS);
3393 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3394 		cp = &sc->pciide_channels[i];
3395 		wdc_cp = &cp->wdc_channel;
3396 		/* If a compat channel skip. */
3397 		if (cp->compat)
3398 			continue;
3399 		if (chids & ACER_CHIDS_INT(i)) {
3400 			crv = wdcintr(wdc_cp);
3401 			if (crv == 0)
3402 				printf("%s:%d: bogus intr\n",
3403 				    sc->sc_wdcdev.sc_dev.dv_xname, i);
3404 			else
3405 				rv = 1;
3406 		}
3407 	}
3408 	return rv;
3409 }
3410 
3411 void
3412 hpt_chip_map(sc, pa)
3413 	struct pciide_softc *sc;
3414 	struct pci_attach_args *pa;
3415 {
3416 	struct pciide_channel *cp;
3417 	int i, compatchan, revision;
3418 	pcireg_t interface;
3419 	bus_size_t cmdsize, ctlsize;
3420 
3421 	if (pciide_chipen(sc, pa) == 0)
3422 		return;
3423 	revision = PCI_REVISION(pa->pa_class);
3424 
3425 	/*
3426 	 * when the chip is in native mode it identifies itself as a
3427 	 * 'misc mass storage'. Fake interface in this case.
3428 	 */
3429 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
3430 		interface = PCI_INTERFACE(pa->pa_class);
3431 	} else {
3432 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
3433 		    PCIIDE_INTERFACE_PCI(0);
3434 		if (revision == HPT370_REV || revision == HPT370A_REV)
3435 			interface |= PCIIDE_INTERFACE_PCI(1);
3436 	}
3437 
3438 	printf(": DMA");
3439 	pciide_mapreg_dma(sc, pa);
3440 	printf("\n");
3441 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3442 	    WDC_CAPABILITY_MODE;
3443 	if (sc->sc_dma_ok) {
3444 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3445 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3446 		sc->sc_wdcdev.irqack = pciide_irqack;
3447 	}
3448 	sc->sc_wdcdev.PIO_cap = 4;
3449 	sc->sc_wdcdev.DMA_cap = 2;
3450 
3451 	sc->sc_wdcdev.set_modes = hpt_setup_channel;
3452 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3453 	if (revision < HPT370_REV) {
3454 		sc->sc_wdcdev.UDMA_cap = 4;
3455 		/*
3456 		 * The 366 has 2 PCI IDE functions, one for primary and one
3457 		 * for secondary. So we need to call pciide_mapregs_compat()
3458 		 * with the real channel
3459 		 */
3460 		if (pa->pa_function == 0) {
3461 			compatchan = 0;
3462 		} else if (pa->pa_function == 1) {
3463 			compatchan = 1;
3464 		} else {
3465 			printf("%s: unexpected PCI function %d\n",
3466 			    sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function);
3467 			return;
3468 		}
3469 		sc->sc_wdcdev.nchannels = 1;
3470 	} else {
3471 		sc->sc_wdcdev.nchannels = 2;
3472 		sc->sc_wdcdev.UDMA_cap = 5;
3473 	}
3474 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3475 		cp = &sc->pciide_channels[i];
3476 		if (sc->sc_wdcdev.nchannels > 1) {
3477 			compatchan = i;
3478 			if((pciide_pci_read(sc->sc_pc, sc->sc_tag,
3479 			    HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) {
3480 				printf("%s: %s ignored (disabled)\n",
3481 				    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3482 				continue;
3483 			}
3484 		}
3485 		if (pciide_chansetup(sc, i, interface) == 0)
3486 			continue;
3487 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
3488 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
3489 			    &ctlsize, hpt_pci_intr);
3490 		} else {
3491 			cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan,
3492 			    &cmdsize, &ctlsize);
3493 		}
3494 		if (cp->hw_ok == 0)
3495 			return;
3496 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
3497 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
3498 		wdcattach(&cp->wdc_channel);
3499 		hpt_setup_channel(&cp->wdc_channel);
3500 	}
3501 	if (revision == HPT370_REV || revision == HPT370A_REV) {
3502 		/*
3503 		 * HPT370_REV has a bit to disable interrupts, make sure
3504 		 * to clear it
3505 		 */
3506 		pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL,
3507 		    pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) &
3508 		    ~HPT_CSEL_IRQDIS);
3509 	}
3510 	return;
3511 }
3512 
3513 void
3514 hpt_setup_channel(chp)
3515 	struct channel_softc *chp;
3516 {
3517 	struct ata_drive_datas *drvp;
3518 	int drive;
3519 	int cable;
3520 	u_int32_t before, after;
3521 	u_int32_t idedma_ctl;
3522 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3523 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3524 
3525 	cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL);
3526 
3527 	/* setup DMA if needed */
3528 	pciide_channel_dma_setup(cp);
3529 
3530 	idedma_ctl = 0;
3531 
3532 	/* Per drive settings */
3533 	for (drive = 0; drive < 2; drive++) {
3534 		drvp = &chp->ch_drive[drive];
3535 		/* If no drive, skip */
3536 		if ((drvp->drive_flags & DRIVE) == 0)
3537 			continue;
3538 		before = pci_conf_read(sc->sc_pc, sc->sc_tag,
3539 				       HPT_IDETIM(chp->channel, drive));
3540 
3541 		/* add timing values, setup DMA if needed */
3542 		if (drvp->drive_flags & DRIVE_UDMA) {
3543 			/* use Ultra/DMA */
3544 			drvp->drive_flags &= ~DRIVE_DMA;
3545 			if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 &&
3546 			    drvp->UDMA_mode > 2)
3547 				drvp->UDMA_mode = 2;
3548 			after = (sc->sc_wdcdev.nchannels == 2) ?
3549 			    hpt370_udma[drvp->UDMA_mode] :
3550 			    hpt366_udma[drvp->UDMA_mode];
3551 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3552 		} else if (drvp->drive_flags & DRIVE_DMA) {
3553 			/*
3554 			 * use Multiword DMA.
3555 			 * Timings will be used for both PIO and DMA, so adjust
3556 			 * DMA mode if needed
3557 			 */
3558 			if (drvp->PIO_mode >= 3 &&
3559 			    (drvp->DMA_mode + 2) > drvp->PIO_mode) {
3560 				drvp->DMA_mode = drvp->PIO_mode - 2;
3561 			}
3562 			after = (sc->sc_wdcdev.nchannels == 2) ?
3563 			    hpt370_dma[drvp->DMA_mode] :
3564 			    hpt366_dma[drvp->DMA_mode];
3565 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3566 		} else {
3567 			/* PIO only */
3568 			after = (sc->sc_wdcdev.nchannels == 2) ?
3569 			    hpt370_pio[drvp->PIO_mode] :
3570 			    hpt366_pio[drvp->PIO_mode];
3571 		}
3572 		pci_conf_write(sc->sc_pc, sc->sc_tag,
3573 		    HPT_IDETIM(chp->channel, drive), after);
3574 		WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x "
3575 		    "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname,
3576 		    after, before), DEBUG_PROBE);
3577 	}
3578 	if (idedma_ctl != 0) {
3579 		/* Add software bits in status register */
3580 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3581 		    IDEDMA_CTL, idedma_ctl);
3582 	}
3583 	pciide_print_modes(cp);
3584 }
3585 
3586 int
3587 hpt_pci_intr(arg)
3588 	void *arg;
3589 {
3590 	struct pciide_softc *sc = arg;
3591 	struct pciide_channel *cp;
3592 	struct channel_softc *wdc_cp;
3593 	int rv = 0;
3594 	int dmastat, i, crv;
3595 
3596 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3597 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3598 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3599 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
3600 		    continue;
3601 		cp = &sc->pciide_channels[i];
3602 		wdc_cp = &cp->wdc_channel;
3603 		crv = wdcintr(wdc_cp);
3604 		if (crv == 0) {
3605 			printf("%s:%d: bogus intr\n",
3606 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3607 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3608 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
3609 		} else
3610 			rv = 1;
3611 	}
3612 	return rv;
3613 }
3614 
3615 /* Macros to test product */
3616 #define PDC_IS_262(sc)							\
3617 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 ||	\
3618 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265  ||	\
3619 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267  ||	\
3620 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268)
3621 #define PDC_IS_265(sc)							\
3622 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 ||	\
3623 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267  ||	\
3624 	(sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268)
3625 #define PDC_IS_268(sc)							\
3626 	((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268)
3627 
3628 void
3629 pdc202xx_chip_map(sc, pa)
3630 	struct pciide_softc *sc;
3631 	struct pci_attach_args *pa;
3632 {
3633 	struct pciide_channel *cp;
3634 	int channel;
3635 	pcireg_t interface, st, mode;
3636 	bus_size_t cmdsize, ctlsize;
3637 
3638 	if (!PDC_IS_268(sc)) {
3639 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3640 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: "
3641 		    "controller state 0x%x\n", st), DEBUG_PROBE);
3642 	}
3643 	if (pciide_chipen(sc, pa) == 0)
3644 		return;
3645 
3646 	/* turn off  RAID mode */
3647 	if (!PDC_IS_268(sc))
3648 		st &= ~PDC2xx_STATE_IDERAID;
3649 
3650 	/*
3651  	 * can't rely on the PCI_CLASS_REG content if the chip was in raid
3652 	 * mode. We have to fake interface
3653 	 */
3654 	interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1);
3655 	if (PDC_IS_268(sc) || st & PDC2xx_STATE_NATIVE)
3656 		interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
3657 
3658 	printf(": DMA");
3659 	pciide_mapreg_dma(sc, pa);
3660 
3661 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
3662 	    WDC_CAPABILITY_MODE | WDC_CAPABILITY_NO_ATAPI_DMA;
3663 	if (sc->sc_dma_ok) {
3664 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
3665 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
3666 		sc->sc_wdcdev.irqack = pciide_irqack;
3667 	}
3668 	sc->sc_wdcdev.PIO_cap = 4;
3669 	sc->sc_wdcdev.DMA_cap = 2;
3670 	if (PDC_IS_265(sc))
3671 		sc->sc_wdcdev.UDMA_cap = 5;
3672 	else if (PDC_IS_262(sc))
3673 		sc->sc_wdcdev.UDMA_cap = 4;
3674 	else
3675 		sc->sc_wdcdev.UDMA_cap = 2;
3676 	sc->sc_wdcdev.set_modes = pdc202xx_setup_channel;
3677 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
3678 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
3679 
3680 	pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
3681 
3682 	if (PDC_IS_268(sc))
3683 		goto pdc268_doesnt_need_it;
3684 	/* setup failsafe defaults */
3685 	mode = 0;
3686 	mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]);
3687 	mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]);
3688 	mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]);
3689 	mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]);
3690 
3691 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3692 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 0 "
3693 		    "initial timings  0x%x, now 0x%x\n", channel,
3694 		    pci_conf_read(sc->sc_pc, sc->sc_tag,
3695 		    PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp),
3696 		    DEBUG_PROBE);
3697 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 0),
3698 		    mode | PDC2xx_TIM_IORDYp);
3699 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d drive 1 "
3700 		    "initial timings  0x%x, now 0x%x\n", channel,
3701 		pci_conf_read(sc->sc_pc, sc->sc_tag,
3702 	 	    PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE);
3703 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_TIM(channel, 1),
3704 		    mode);
3705 	}
3706 
3707 	mode = PDC2xx_SCR_DMA;
3708 	if (PDC_IS_262(sc)) {
3709 		mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT);
3710 	} else {
3711 		/* the BIOS set it up this way */
3712 		mode = PDC2xx_SCR_SET_GEN(mode, 0x1);
3713 	}
3714 	mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */
3715 	mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */
3716 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR  0x%x, now 0x%x\n",
3717 	    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR), mode),
3718 	    DEBUG_PROBE);
3719 	bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR, mode);
3720 
3721 	/* controller initial state register is OK even without BIOS */
3722 	/* Set DMA mode to IDE DMA compatibility */
3723 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM);
3724 	WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode ),
3725 	    DEBUG_PROBE);
3726 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM,
3727 	    mode | 0x1);
3728 	mode = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM);
3729 	WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE);
3730 	bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM,
3731 	    mode | 0x1);
3732 
3733 pdc268_doesnt_need_it:
3734 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
3735 		cp = &sc->pciide_channels[channel];
3736 		if (pciide_chansetup(sc, channel, interface) == 0)
3737 			continue;
3738 		if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ?
3739 		    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) {
3740 			printf("%s: %s ignored (disabled)\n",
3741 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
3742 			continue;
3743 		}
3744 		pciide_map_compat_intr(pa, cp, channel, interface);
3745 		if (cp->hw_ok == 0)
3746 			continue;
3747 		if (PDC_IS_265(sc))
3748 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3749 			    pdc20265_pci_intr);
3750 		else
3751 			pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
3752 			    pdc202xx_pci_intr);
3753 		if (cp->hw_ok == 0) {
3754 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3755 			continue;
3756 		}
3757 		if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) {
3758 			st &= ~(PDC_IS_262(sc) ?
3759 			    PDC262_STATE_EN(channel):PDC246_STATE_EN(channel));
3760 			pciide_unmap_compat_intr(pa, cp, channel, interface);
3761 		}
3762 		pdc202xx_setup_channel(&cp->wdc_channel);
3763         }
3764 	if (!PDC_IS_268(sc)) {
3765 		WDCDEBUG_PRINT(("pdc202xx_setup_chip: "
3766 		"new controller state 0x%x\n", st), DEBUG_PROBE);
3767 		pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st);
3768 	}
3769 	return;
3770 }
3771 
3772 void
3773 pdc202xx_setup_channel(chp)
3774 	struct channel_softc *chp;
3775 {
3776 	struct ata_drive_datas *drvp;
3777 	int drive;
3778 	pcireg_t mode, st;
3779 	u_int32_t idedma_ctl, scr, atapi;
3780 	struct pciide_channel *cp = (struct pciide_channel*)chp;
3781 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
3782 	int channel = chp->channel;
3783 
3784 	/* setup DMA if needed */
3785 	pciide_channel_dma_setup(cp);
3786 
3787 	if (PDC_IS_268(sc))
3788 		goto skip_for_pdc268;
3789 
3790 	idedma_ctl = 0;
3791 	WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n",
3792 	    sc->sc_wdcdev.sc_dev.dv_xname,
3793 	    bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)),
3794 	    DEBUG_PROBE);
3795 
3796 	/* Per channel settings */
3797 	if (PDC_IS_262(sc)) {
3798 		scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3799 		    PDC262_U66);
3800 		st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE);
3801 		/* Trim UDMA mode */
3802 		if ((st & PDC262_STATE_80P(channel)) != 0 ||
3803 		    (chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3804 		    chp->ch_drive[0].UDMA_mode <= 2) ||
3805 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3806 		    chp->ch_drive[1].UDMA_mode <= 2)) {
3807 			if (chp->ch_drive[0].UDMA_mode > 2)
3808 				chp->ch_drive[0].UDMA_mode = 2;
3809 			if (chp->ch_drive[1].UDMA_mode > 2)
3810 				chp->ch_drive[1].UDMA_mode = 2;
3811 		}
3812 		/* Set U66 if needed */
3813 		if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA &&
3814 		    chp->ch_drive[0].UDMA_mode > 2) ||
3815 		    (chp->ch_drive[1].drive_flags & DRIVE_UDMA &&
3816 		    chp->ch_drive[1].UDMA_mode > 2))
3817 			scr |= PDC262_U66_EN(channel);
3818 		else
3819 			scr &= ~PDC262_U66_EN(channel);
3820 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3821 		    PDC262_U66, scr);
3822 		WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n",
3823 		    sc->sc_wdcdev.sc_dev.dv_xname, channel,
3824 		    bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3825 		    PDC262_ATAPI(channel))), DEBUG_PROBE);
3826 		if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI ||
3827 		    chp->ch_drive[1].drive_flags & DRIVE_ATAPI) {
3828 			if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3829 			    !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3830 			    (chp->ch_drive[1].drive_flags & DRIVE_DMA)) ||
3831 			    ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) &&
3832 			    !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) &&
3833 			    (chp->ch_drive[0].drive_flags & DRIVE_DMA)))
3834 				atapi = 0;
3835 			else
3836 				atapi = PDC262_ATAPI_UDMA;
3837 			bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh,
3838 			    PDC262_ATAPI(channel), atapi);
3839 		}
3840 	}
3841 	for (drive = 0; drive < 2; drive++) {
3842 		drvp = &chp->ch_drive[drive];
3843 		/* If no drive, skip */
3844 		if ((drvp->drive_flags & DRIVE) == 0)
3845 			continue;
3846 		mode = 0;
3847 		if (drvp->drive_flags & DRIVE_UDMA) {
3848 			/* use Ultra/DMA */
3849 			drvp->drive_flags &= ~DRIVE_DMA;
3850 			mode = PDC2xx_TIM_SET_MB(mode,
3851 			   pdc2xx_udma_mb[drvp->UDMA_mode]);
3852 			mode = PDC2xx_TIM_SET_MC(mode,
3853 			   pdc2xx_udma_mc[drvp->UDMA_mode]);
3854 			drvp->drive_flags &= ~DRIVE_DMA;
3855 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3856 		} else if (drvp->drive_flags & DRIVE_DMA) {
3857 			mode = PDC2xx_TIM_SET_MB(mode,
3858 			    pdc2xx_dma_mb[drvp->DMA_mode]);
3859 			mode = PDC2xx_TIM_SET_MC(mode,
3860 			   pdc2xx_dma_mc[drvp->DMA_mode]);
3861 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
3862 		} else {
3863 			mode = PDC2xx_TIM_SET_MB(mode,
3864 			    pdc2xx_dma_mb[0]);
3865 			mode = PDC2xx_TIM_SET_MC(mode,
3866 			    pdc2xx_dma_mc[0]);
3867 		}
3868 		mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]);
3869 		mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]);
3870 		if (drvp->drive_flags & DRIVE_ATA)
3871 			mode |= PDC2xx_TIM_PRE;
3872 		mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY;
3873 		if (drvp->PIO_mode >= 3) {
3874 			mode |= PDC2xx_TIM_IORDY;
3875 			if (drive == 0)
3876 				mode |= PDC2xx_TIM_IORDYp;
3877 		}
3878 		WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d "
3879 		    "timings 0x%x\n",
3880 		    sc->sc_wdcdev.sc_dev.dv_xname,
3881 		    chp->channel, drive, mode), DEBUG_PROBE);
3882 		    pci_conf_write(sc->sc_pc, sc->sc_tag,
3883 		    PDC2xx_TIM(chp->channel, drive), mode);
3884 	}
3885 	if (idedma_ctl != 0) {
3886 		/* Add software bits in status register */
3887 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
3888 		    IDEDMA_CTL, idedma_ctl);
3889 	}
3890 
3891 skip_for_pdc268:
3892 	pciide_print_modes(cp);
3893 }
3894 
3895 int
3896 pdc202xx_pci_intr(arg)
3897 	void *arg;
3898 {
3899 	struct pciide_softc *sc = arg;
3900 	struct pciide_channel *cp;
3901 	struct channel_softc *wdc_cp;
3902 	int i, rv, crv;
3903 	u_int32_t scr;
3904 
3905 	rv = 0;
3906 	scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR);
3907 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3908 		cp = &sc->pciide_channels[i];
3909 		wdc_cp = &cp->wdc_channel;
3910 		/* If a compat channel skip. */
3911 		if (cp->compat)
3912 			continue;
3913 		if (scr & PDC2xx_SCR_INT(i)) {
3914 			crv = wdcintr(wdc_cp);
3915 			if (crv == 0)
3916 				printf("%s:%d: bogus intr (reg 0x%x)\n",
3917 				    sc->sc_wdcdev.sc_dev.dv_xname, i, scr);
3918 			else
3919 				rv = 1;
3920 		}
3921         }
3922 	return rv;
3923 }
3924 
3925 int
3926 pdc20265_pci_intr(arg)
3927 	void *arg;
3928 {
3929 	struct pciide_softc *sc = arg;
3930 	struct pciide_channel *cp;
3931 	struct channel_softc *wdc_cp;
3932 	int i, rv, crv;
3933 	u_int32_t dmastat;
3934 
3935 	rv = 0;
3936 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
3937 		cp = &sc->pciide_channels[i];
3938 		wdc_cp = &cp->wdc_channel;
3939 		/* If a compat channel skip. */
3940 		if (cp->compat)
3941 			continue;
3942 		/*
3943 		 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously,
3944 		 * however it asserts INT in IDEDMA_CTL even for non-DMA ops.
3945 		 * So use it instead (requires 2 reg reads instead of 1,
3946 		 * but we can't do it another way).
3947 		 */
3948 		dmastat = bus_space_read_1(sc->sc_dma_iot,
3949 		    sc->sc_dma_ioh, IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
3950 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
3951 			continue;
3952 
3953 		crv = wdcintr(wdc_cp);
3954 		if (crv == 0)
3955 			printf("%s:%d: bogus intr\n",
3956 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
3957 		else
3958 			rv = 1;
3959 	}
3960 	return rv;
3961 }
3962 
3963 /*
3964  * Inline functions for accessing the timing registers of the
3965  * OPTi controller.
3966  *
3967  * These *MUST* disable interrupts as they need atomic access to
3968  * certain magic registers. Failure to adhere to this *will*
3969  * break things in subtle ways if the wdc registers are accessed
3970  * by an interrupt routine while this magic sequence is executing.
3971  */
3972 static __inline__ u_int8_t
3973 opti_read_config(struct channel_softc *chp, int reg)
3974 {
3975 	u_int8_t rv;
3976 	int s = splhigh();
3977 
3978 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
3979 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3980 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
3981 
3982 	/* Followed by an 8-bit write of 0x3 to register #2 */
3983 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
3984 
3985 	/* Now we can read the required register */
3986 	rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg);
3987 
3988 	/* Restore the real registers */
3989 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
3990 
3991 	splx(s);
3992 
3993 	return rv;
3994 }
3995 
3996 static __inline__ void
3997 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val)
3998 {
3999 	int s = splhigh();
4000 
4001 	/* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */
4002 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
4003 	(void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features);
4004 
4005 	/* Followed by an 8-bit write of 0x3 to register #2 */
4006 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u);
4007 
4008 	/* Now we can write the required register */
4009 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val);
4010 
4011 	/* Restore the real registers */
4012 	bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u);
4013 
4014 	splx(s);
4015 }
4016 
4017 void
4018 opti_chip_map(sc, pa)
4019 	struct pciide_softc *sc;
4020 	struct pci_attach_args *pa;
4021 {
4022 	struct pciide_channel *cp;
4023 	bus_size_t cmdsize, ctlsize;
4024 	pcireg_t interface;
4025 	u_int8_t init_ctrl;
4026 	int channel;
4027 
4028 	if (pciide_chipen(sc, pa) == 0)
4029 		return;
4030 	printf(": DMA");
4031 	/*
4032 	 * XXXSCW:
4033 	 * There seem to be a couple of buggy revisions/implementations
4034 	 * of the OPTi pciide chipset. This kludge seems to fix one of
4035 	 * the reported problems (NetBSD PR/11644) but still fails for the
4036 	 * other (NetBSD PR/13151), although the latter may be due to other
4037 	 * issues too...
4038 	 */
4039 	if (PCI_REVISION(pa->pa_class) <= 0x12) {
4040 		printf(" (disabled)");
4041 		sc->sc_dma_ok = 0;
4042 		sc->sc_wdcdev.cap = 0;
4043 	} else {
4044 		sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32;
4045 		pciide_mapreg_dma(sc, pa);
4046 	}
4047 
4048 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE;
4049 	sc->sc_wdcdev.PIO_cap = 4;
4050 	if (sc->sc_dma_ok) {
4051 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK;
4052 		sc->sc_wdcdev.irqack = pciide_irqack;
4053 		sc->sc_wdcdev.DMA_cap = 2;
4054 	}
4055 	sc->sc_wdcdev.set_modes = opti_setup_channel;
4056 
4057 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
4058 	sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS;
4059 
4060 	init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag,
4061 	    OPTI_REG_INIT_CONTROL);
4062 
4063 	interface = PCI_INTERFACE(pa->pa_class);
4064 
4065         pciide_print_channels(sc->sc_wdcdev.nchannels, interface);
4066 
4067 	for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) {
4068 		cp = &sc->pciide_channels[channel];
4069 		if (pciide_chansetup(sc, channel, interface) == 0)
4070 			continue;
4071 		if (channel == 1 &&
4072 		    (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) {
4073 			printf("%s: %s channel ignored (disabled)\n",
4074 			    sc->sc_wdcdev.sc_dev.dv_xname, cp->name);
4075 			continue;
4076 		}
4077 		pciide_map_compat_intr(pa, cp, channel, interface);
4078 		if (cp->hw_ok == 0)
4079 			continue;
4080 		pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize,
4081 		    pciide_pci_intr);
4082 		if (cp->hw_ok == 0) {
4083 			pciide_unmap_compat_intr(pa, cp, channel, interface);
4084 			continue;
4085 		}
4086 		opti_setup_channel(&cp->wdc_channel);
4087 	}
4088 }
4089 
4090 void
4091 opti_setup_channel(chp)
4092 	struct channel_softc *chp;
4093 {
4094 	struct ata_drive_datas *drvp;
4095 	struct pciide_channel *cp = (struct pciide_channel*)chp;
4096 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4097 	int drive,spd;
4098 	int mode[2];
4099 	u_int8_t rv, mr;
4100 
4101 	/*
4102 	 * The `Delay' and `Address Setup Time' fields of the
4103 	 * Miscellaneous Register are always zero initially.
4104 	 */
4105 	mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK;
4106 	mr &= ~(OPTI_MISC_DELAY_MASK |
4107 		OPTI_MISC_ADDR_SETUP_MASK |
4108 		OPTI_MISC_INDEX_MASK);
4109 
4110 	/* Prime the control register before setting timing values */
4111 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE);
4112 
4113 	/* Determine the clockrate of the PCIbus the chip is attached to */
4114 	spd = (int) opti_read_config(chp, OPTI_REG_STRAP);
4115 	spd &= OPTI_STRAP_PCI_SPEED_MASK;
4116 
4117 	/* setup DMA if needed */
4118 	pciide_channel_dma_setup(cp);
4119 
4120 	for (drive = 0; drive < 2; drive++) {
4121 		drvp = &chp->ch_drive[drive];
4122 		/* If no drive, skip */
4123 		if ((drvp->drive_flags & DRIVE) == 0) {
4124 			mode[drive] = -1;
4125 			continue;
4126 		}
4127 
4128 		if ((drvp->drive_flags & DRIVE_DMA)) {
4129 			/*
4130 			 * Timings will be used for both PIO and DMA,
4131 			 * so adjust DMA mode if needed
4132 			 */
4133 			if (drvp->PIO_mode > (drvp->DMA_mode + 2))
4134 				drvp->PIO_mode = drvp->DMA_mode + 2;
4135 			if (drvp->DMA_mode + 2 > (drvp->PIO_mode))
4136 				drvp->DMA_mode = (drvp->PIO_mode > 2) ?
4137 				    drvp->PIO_mode - 2 : 0;
4138 			if (drvp->DMA_mode == 0)
4139 				drvp->PIO_mode = 0;
4140 
4141 			mode[drive] = drvp->DMA_mode + 5;
4142 		} else
4143 			mode[drive] = drvp->PIO_mode;
4144 
4145 		if (drive && mode[0] >= 0 &&
4146 		    (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) {
4147 			/*
4148 			 * Can't have two drives using different values
4149 			 * for `Address Setup Time'.
4150 			 * Slow down the faster drive to compensate.
4151 			 */
4152 			int d = (opti_tim_as[spd][mode[0]] >
4153 				 opti_tim_as[spd][mode[1]]) ?  0 : 1;
4154 
4155 			mode[d] = mode[1-d];
4156 			chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode;
4157 			chp->ch_drive[d].DMA_mode = 0;
4158 			chp->ch_drive[d].drive_flags &= DRIVE_DMA;
4159 		}
4160 	}
4161 
4162 	for (drive = 0; drive < 2; drive++) {
4163 		int m;
4164 		if ((m = mode[drive]) < 0)
4165 			continue;
4166 
4167 		/* Set the Address Setup Time and select appropriate index */
4168 		rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT;
4169 		rv |= OPTI_MISC_INDEX(drive);
4170 		opti_write_config(chp, OPTI_REG_MISC, mr | rv);
4171 
4172 		/* Set the pulse width and recovery timing parameters */
4173 		rv  = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT;
4174 		rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT;
4175 		opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv);
4176 		opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv);
4177 
4178 		/* Set the Enhanced Mode register appropriately */
4179 	    	rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE);
4180 		rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive);
4181 		rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]);
4182 		pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv);
4183 	}
4184 
4185 	/* Finally, enable the timings */
4186 	opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE);
4187 
4188 	pciide_print_modes(cp);
4189 }
4190 
4191 
4192 #define	ACARD_IS_850(sc)							\
4193 	((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U)
4194 
4195 void
4196 acard_chip_map(sc, pa)
4197 	struct pciide_softc *sc;
4198 	struct pci_attach_args *pa;
4199 {
4200 	struct pciide_channel *cp;
4201 	int i;
4202 	pcireg_t interface;
4203 	bus_size_t cmdsize, ctlsize;
4204 
4205 	if (pciide_chipen(sc, pa) == 0)
4206 		return;
4207 
4208 	/*
4209 	 * when the chip is in native mode it identifies itself as a
4210 	 * 'misc mass storage'. Fake interface in this case.
4211 	 */
4212 	if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) {
4213 		interface = PCI_INTERFACE(pa->pa_class);
4214 	} else {
4215 		interface = PCIIDE_INTERFACE_BUS_MASTER_DMA |
4216 		    PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1);
4217 	}
4218 
4219 	printf(": DMA");
4220 	pciide_mapreg_dma(sc, pa);
4221 	printf("\n");
4222 	sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 |
4223 	    WDC_CAPABILITY_MODE;
4224 
4225 	if (sc->sc_dma_ok) {
4226 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA;
4227 		sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK;
4228 		sc->sc_wdcdev.irqack = pciide_irqack;
4229 	}
4230 	sc->sc_wdcdev.PIO_cap = 4;
4231 	sc->sc_wdcdev.DMA_cap = 2;
4232 	sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4;
4233 
4234 	sc->sc_wdcdev.set_modes = acard_setup_channel;
4235 	sc->sc_wdcdev.channels = sc->wdc_chanarray;
4236 	sc->sc_wdcdev.nchannels = 2;
4237 
4238 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4239 		cp = &sc->pciide_channels[i];
4240 		if (pciide_chansetup(sc, i, interface) == 0)
4241 			continue;
4242 		if (interface & PCIIDE_INTERFACE_PCI(i)) {
4243 			cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize,
4244 			    &ctlsize, pciide_pci_intr);
4245 		} else {
4246 			cp->hw_ok = pciide_mapregs_compat(pa, cp, i,
4247 			    &cmdsize, &ctlsize);
4248 		}
4249 		if (cp->hw_ok == 0)
4250 			return;
4251 		cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot;
4252 		cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh;
4253 		wdcattach(&cp->wdc_channel);
4254 		acard_setup_channel(&cp->wdc_channel);
4255 	}
4256 	if (!ACARD_IS_850(sc)) {
4257 		u_int32_t reg;
4258 		reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL);
4259 		reg &= ~ATP860_CTRL_INT;
4260 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg);
4261 	}
4262 }
4263 
4264 void
4265 acard_setup_channel(chp)
4266 	struct channel_softc *chp;
4267 {
4268 	struct ata_drive_datas *drvp;
4269 	struct pciide_channel *cp = (struct pciide_channel*)chp;
4270 	struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc;
4271 	int channel = chp->channel;
4272 	int drive;
4273 	u_int32_t idetime, udma_mode;
4274 	u_int32_t idedma_ctl;
4275 
4276 	/* setup DMA if needed */
4277 	pciide_channel_dma_setup(cp);
4278 
4279 	if (ACARD_IS_850(sc)) {
4280 		idetime = 0;
4281 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA);
4282 		udma_mode &= ~ATP850_UDMA_MASK(channel);
4283 	} else {
4284 		idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME);
4285 		idetime &= ~ATP860_SETTIME_MASK(channel);
4286 		udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA);
4287 		udma_mode &= ~ATP860_UDMA_MASK(channel);
4288 	}
4289 
4290 	idedma_ctl = 0;
4291 
4292 	/* Per drive settings */
4293 	for (drive = 0; drive < 2; drive++) {
4294 		drvp = &chp->ch_drive[drive];
4295 		/* If no drive, skip */
4296 		if ((drvp->drive_flags & DRIVE) == 0)
4297 			continue;
4298 		/* add timing values, setup DMA if needed */
4299 		if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) &&
4300 		    (drvp->drive_flags & DRIVE_UDMA)) {
4301 			/* use Ultra/DMA */
4302 			if (ACARD_IS_850(sc)) {
4303 				idetime |= ATP850_SETTIME(drive,
4304 				    acard_act_udma[drvp->UDMA_mode],
4305 				    acard_rec_udma[drvp->UDMA_mode]);
4306 				udma_mode |= ATP850_UDMA_MODE(channel, drive,
4307 				    acard_udma_conf[drvp->UDMA_mode]);
4308 			} else {
4309 				idetime |= ATP860_SETTIME(channel, drive,
4310 				    acard_act_udma[drvp->UDMA_mode],
4311 				    acard_rec_udma[drvp->UDMA_mode]);
4312 				udma_mode |= ATP860_UDMA_MODE(channel, drive,
4313 				    acard_udma_conf[drvp->UDMA_mode]);
4314 			}
4315 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4316 		} else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) &&
4317 		    (drvp->drive_flags & DRIVE_DMA)) {
4318 			/* use Multiword DMA */
4319 			drvp->drive_flags &= ~DRIVE_UDMA;
4320 			if (ACARD_IS_850(sc)) {
4321 				idetime |= ATP850_SETTIME(drive,
4322 				    acard_act_dma[drvp->DMA_mode],
4323 				    acard_rec_dma[drvp->DMA_mode]);
4324 			} else {
4325 				idetime |= ATP860_SETTIME(channel, drive,
4326 				    acard_act_dma[drvp->DMA_mode],
4327 				    acard_rec_dma[drvp->DMA_mode]);
4328 			}
4329 			idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive);
4330 		} else {
4331 			/* PIO only */
4332 			drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA);
4333 			if (ACARD_IS_850(sc)) {
4334 				idetime |= ATP850_SETTIME(drive,
4335 				    acard_act_pio[drvp->PIO_mode],
4336 				    acard_rec_pio[drvp->PIO_mode]);
4337 			} else {
4338 				idetime |= ATP860_SETTIME(channel, drive,
4339 				    acard_act_pio[drvp->PIO_mode],
4340 				    acard_rec_pio[drvp->PIO_mode]);
4341 			}
4342 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL,
4343 		    pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL)
4344 		    | ATP8x0_CTRL_EN(channel));
4345 		}
4346 	}
4347 
4348 	if (idedma_ctl != 0) {
4349 		/* Add software bits in status register */
4350 		bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4351 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * channel, idedma_ctl);
4352 	}
4353 	pciide_print_modes(cp);
4354 
4355 	if (ACARD_IS_850(sc)) {
4356 		pci_conf_write(sc->sc_pc, sc->sc_tag,
4357 		    ATP850_IDETIME(channel), idetime);
4358 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode);
4359 	} else {
4360 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime);
4361 		pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode);
4362 	}
4363 }
4364 
4365 int
4366 acard_pci_intr(arg)
4367 	void *arg;
4368 {
4369 	struct pciide_softc *sc = arg;
4370 	struct pciide_channel *cp;
4371 	struct channel_softc *wdc_cp;
4372 	int rv = 0;
4373 	int dmastat, i, crv;
4374 
4375 	for (i = 0; i < sc->sc_wdcdev.nchannels; i++) {
4376 		dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4377 		    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i);
4378 		if ((dmastat & IDEDMA_CTL_INTR) == 0)
4379 			continue;
4380 		cp = &sc->pciide_channels[i];
4381 		wdc_cp = &cp->wdc_channel;
4382 		if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) {
4383 			(void)wdcintr(wdc_cp);
4384 			bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh,
4385 			    IDEDMA_CTL + IDEDMA_SCH_OFFSET * i, dmastat);
4386 			continue;
4387 		}
4388 		crv = wdcintr(wdc_cp);
4389 		if (crv == 0)
4390 			printf("%s:%d: bogus intr\n",
4391 			    sc->sc_wdcdev.sc_dev.dv_xname, i);
4392 		else if (crv == 1)
4393 			rv = 1;
4394 		else if (rv == 0)
4395 			rv = crv;
4396 	}
4397 	return rv;
4398 }
4399