xref: /netbsd-src/sys/dev/pci/pci.c (revision eb7c1594f145c931049e1fd9eb056a5987e87e59)
1 /*	$NetBSD: pci.c,v 1.80 2003/06/15 23:09:09 fvdl Exp $	*/
2 
3 /*
4  * Copyright (c) 1995, 1996, 1997, 1998
5  *     Christopher G. Demetriou.  All rights reserved.
6  * Copyright (c) 1994 Charles M. Hannum.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by Charles M. Hannum.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  * PCI bus autoconfiguration.
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: pci.c,v 1.80 2003/06/15 23:09:09 fvdl Exp $");
40 
41 #include "opt_pci.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/device.h>
46 
47 #include <dev/pci/pcireg.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcidevs.h>
50 
51 #include <uvm/uvm_extern.h>
52 
53 #include "locators.h"
54 
55 #ifdef PCI_CONFIG_DUMP
56 int pci_config_dump = 1;
57 #else
58 int pci_config_dump = 0;
59 #endif
60 
61 int pcimatch __P((struct device *, struct cfdata *, void *));
62 void pciattach __P((struct device *, struct device *, void *));
63 
64 CFATTACH_DECL(pci, sizeof(struct pci_softc),
65     pcimatch, pciattach, NULL, NULL);
66 
67 int	pciprint __P((void *, const char *));
68 int	pcisubmatch __P((struct device *, struct cfdata *, void *));
69 
70 /*
71  * Important note about PCI-ISA bridges:
72  *
73  * Callbacks are used to configure these devices so that ISA/EISA bridges
74  * can attach their child busses after PCI configuration is done.
75  *
76  * This works because:
77  *	(1) there can be at most one ISA/EISA bridge per PCI bus, and
78  *	(2) any ISA/EISA bridges must be attached to primary PCI
79  *	    busses (i.e. bus zero).
80  *
81  * That boils down to: there can only be one of these outstanding
82  * at a time, it is cleared when configuring PCI bus 0 before any
83  * subdevices have been found, and it is run after all subdevices
84  * of PCI bus 0 have been found.
85  *
86  * This is needed because there are some (legacy) PCI devices which
87  * can show up as ISA/EISA devices as well (the prime example of which
88  * are VGA controllers).  If you attach ISA from a PCI-ISA/EISA bridge,
89  * and the bridge is seen before the video board is, the board can show
90  * up as an ISA device, and that can (bogusly) complicate the PCI device's
91  * attach code, or make the PCI device not be properly attached at all.
92  *
93  * We use the generic config_defer() facility to achieve this.
94  */
95 
96 int
97 pcimatch(parent, cf, aux)
98 	struct device *parent;
99 	struct cfdata *cf;
100 	void *aux;
101 {
102 	struct pcibus_attach_args *pba = aux;
103 
104 	if (strcmp(pba->pba_busname, cf->cf_name))
105 		return (0);
106 
107 	/* Check the locators */
108 	if (cf->pcibuscf_bus != PCIBUS_UNK_BUS &&
109 	    cf->pcibuscf_bus != pba->pba_bus)
110 		return (0);
111 
112 	/* sanity */
113 	if (pba->pba_bus < 0 || pba->pba_bus > 255)
114 		return (0);
115 
116 	/*
117 	 * XXX check other (hardware?) indicators
118 	 */
119 
120 	return (1);
121 }
122 
123 void
124 pciattach(parent, self, aux)
125 	struct device *parent, *self;
126 	void *aux;
127 {
128 	struct pcibus_attach_args *pba = aux;
129 	struct pci_softc *sc = (struct pci_softc *)self;
130 	int io_enabled, mem_enabled, mrl_enabled, mrm_enabled, mwi_enabled;
131 	const char *sep = "";
132 
133 	pci_attach_hook(parent, self, pba);
134 
135 	aprint_naive("\n");
136 	aprint_normal("\n");
137 
138 	io_enabled = (pba->pba_flags & PCI_FLAGS_IO_ENABLED);
139 	mem_enabled = (pba->pba_flags & PCI_FLAGS_MEM_ENABLED);
140 	mrl_enabled = (pba->pba_flags & PCI_FLAGS_MRL_OKAY);
141 	mrm_enabled = (pba->pba_flags & PCI_FLAGS_MRM_OKAY);
142 	mwi_enabled = (pba->pba_flags & PCI_FLAGS_MWI_OKAY);
143 
144 	if (io_enabled == 0 && mem_enabled == 0) {
145 		aprint_error("%s: no spaces enabled!\n", self->dv_xname);
146 		return;
147 	}
148 
149 #define	PRINT(str)							\
150 do {									\
151 	aprint_normal("%s%s", sep, str);				\
152 	sep = ", ";							\
153 } while (/*CONSTCOND*/0)
154 
155 	aprint_normal("%s: ", self->dv_xname);
156 
157 	if (io_enabled)
158 		PRINT("i/o space");
159 	if (mem_enabled)
160 		PRINT("memory space");
161 	aprint_normal(" enabled");
162 
163 	if (mrl_enabled || mrm_enabled || mwi_enabled) {
164 		if (mrl_enabled)
165 			PRINT("rd/line");
166 		if (mrm_enabled)
167 			PRINT("rd/mult");
168 		if (mwi_enabled)
169 			PRINT("wr/inv");
170 		aprint_normal(" ok");
171 	}
172 
173 	aprint_normal("\n");
174 
175 #undef PRINT
176 
177 	sc->sc_iot = pba->pba_iot;
178 	sc->sc_memt = pba->pba_memt;
179 	sc->sc_dmat = pba->pba_dmat;
180 	sc->sc_dmat64 = pba->pba_dmat64;
181 	sc->sc_pc = pba->pba_pc;
182 	sc->sc_bus = pba->pba_bus;
183 	sc->sc_bridgetag = pba->pba_bridgetag;
184 	sc->sc_maxndevs = pci_bus_maxdevs(pba->pba_pc, pba->pba_bus);
185 	sc->sc_intrswiz = pba->pba_intrswiz;
186 	sc->sc_intrtag = pba->pba_intrtag;
187 	sc->sc_flags = pba->pba_flags;
188 	pci_enumerate_bus(sc, NULL, NULL);
189 }
190 
191 int
192 pciprint(aux, pnp)
193 	void *aux;
194 	const char *pnp;
195 {
196 	struct pci_attach_args *pa = aux;
197 	char devinfo[256];
198 	const struct pci_quirkdata *qd;
199 
200 	if (pnp) {
201 		pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
202 		aprint_normal("%s at %s", devinfo, pnp);
203 	}
204 	aprint_normal(" dev %d function %d", pa->pa_device, pa->pa_function);
205 	if (pci_config_dump) {
206 		printf(": ");
207 		pci_conf_print(pa->pa_pc, pa->pa_tag, NULL);
208 		if (!pnp)
209 			pci_devinfo(pa->pa_id, pa->pa_class, 1, devinfo);
210 		printf("%s at %s", devinfo, pnp ? pnp : "?");
211 		printf(" dev %d function %d (", pa->pa_device, pa->pa_function);
212 #ifdef __i386__
213 		printf("tag %#lx, intrtag %#lx, intrswiz %#lx, intrpin %#lx",
214 		    *(long *)&pa->pa_tag, *(long *)&pa->pa_intrtag,
215 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
216 #else
217 		printf("intrswiz %#lx, intrpin %#lx",
218 		    (long)pa->pa_intrswiz, (long)pa->pa_intrpin);
219 #endif
220 		printf(", i/o %s, mem %s,",
221 		    pa->pa_flags & PCI_FLAGS_IO_ENABLED ? "on" : "off",
222 		    pa->pa_flags & PCI_FLAGS_MEM_ENABLED ? "on" : "off");
223 		qd = pci_lookup_quirkdata(PCI_VENDOR(pa->pa_id),
224 		    PCI_PRODUCT(pa->pa_id));
225 		if (qd == NULL) {
226 			printf(" no quirks");
227 		} else {
228 			bitmask_snprintf(qd->quirks,
229 			    "\20\1multifn", devinfo, sizeof (devinfo));
230 			printf(" quirks %s", devinfo);
231 		}
232 		printf(")");
233 	}
234 	return (UNCONF);
235 }
236 
237 int
238 pcisubmatch(parent, cf, aux)
239 	struct device *parent;
240 	struct cfdata *cf;
241 	void *aux;
242 {
243 	struct pci_attach_args *pa = aux;
244 
245 	if (cf->pcicf_dev != PCI_UNK_DEV &&
246 	    cf->pcicf_dev != pa->pa_device)
247 		return (0);
248 	if (cf->pcicf_function != PCI_UNK_FUNCTION &&
249 	    cf->pcicf_function != pa->pa_function)
250 		return (0);
251 	return (config_match(parent, cf, aux));
252 }
253 
254 int
255 pci_probe_device(struct pci_softc *sc, pcitag_t tag,
256     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
257 {
258 	pci_chipset_tag_t pc = sc->sc_pc;
259 	struct pci_attach_args pa;
260 	pcireg_t id, csr, class, intr, bhlcr;
261 	int ret, pin, bus, device, function;
262 
263 	pci_decompose_tag(pc, tag, &bus, &device, &function);
264 
265 	id = pci_conf_read(pc, tag, PCI_ID_REG);
266 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
267 	class = pci_conf_read(pc, tag, PCI_CLASS_REG);
268 	intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG);
269 	bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
270 
271 	/* Invalid vendor ID value? */
272 	if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
273 		return (0);
274 	/* XXX Not invalid, but we've done this ~forever. */
275 	if (PCI_VENDOR(id) == 0)
276 		return (0);
277 
278 	pa.pa_iot = sc->sc_iot;
279 	pa.pa_memt = sc->sc_memt;
280 	pa.pa_dmat = sc->sc_dmat;
281 	pa.pa_dmat64 = sc->sc_dmat64;
282 	pa.pa_pc = pc;
283 	pa.pa_bus = bus;
284 	pa.pa_device = device;
285 	pa.pa_function = function;
286 	pa.pa_tag = tag;
287 	pa.pa_id = id;
288 	pa.pa_class = class;
289 
290 	/*
291 	 * Set up memory, I/O enable, and PCI command flags
292 	 * as appropriate.
293 	 */
294 	pa.pa_flags = sc->sc_flags;
295 	if ((csr & PCI_COMMAND_IO_ENABLE) == 0)
296 		pa.pa_flags &= ~PCI_FLAGS_IO_ENABLED;
297 	if ((csr & PCI_COMMAND_MEM_ENABLE) == 0)
298 		pa.pa_flags &= ~PCI_FLAGS_MEM_ENABLED;
299 
300 	/*
301 	 * If the cache line size is not configured, then
302 	 * clear the MRL/MRM/MWI command-ok flags.
303 	 */
304 	if (PCI_CACHELINE(bhlcr) == 0)
305 		pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY|
306 		    PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY);
307 
308 	if (sc->sc_bridgetag == NULL) {
309 		pa.pa_intrswiz = 0;
310 		pa.pa_intrtag = tag;
311 	} else {
312 		pa.pa_intrswiz = sc->sc_intrswiz + device;
313 		pa.pa_intrtag = sc->sc_intrtag;
314 	}
315 	pin = PCI_INTERRUPT_PIN(intr);
316 	pa.pa_rawintrpin = pin;
317 	if (pin == PCI_INTERRUPT_PIN_NONE) {
318 		/* no interrupt */
319 		pa.pa_intrpin = 0;
320 	} else {
321 		/*
322 		 * swizzle it based on the number of busses we're
323 		 * behind and our device number.
324 		 */
325 		pa.pa_intrpin = 	/* XXX */
326 		    ((pin + pa.pa_intrswiz - 1) % 4) + 1;
327 	}
328 	pa.pa_intrline = PCI_INTERRUPT_LINE(intr);
329 
330 	if (match != NULL) {
331 		ret = (*match)(&pa);
332 		if (ret != 0 && pap != NULL)
333 			*pap = pa;
334 	} else {
335 		ret = config_found_sm(&sc->sc_dev, &pa, pciprint,
336 		    pcisubmatch) != NULL;
337 	}
338 
339 	return (ret);
340 }
341 
342 int
343 pci_get_capability(pc, tag, capid, offset, value)
344 	pci_chipset_tag_t pc;
345 	pcitag_t tag;
346 	int capid;
347 	int *offset;
348 	pcireg_t *value;
349 {
350 	pcireg_t reg;
351 	unsigned int ofs;
352 
353 	reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
354 	if (!(reg & PCI_STATUS_CAPLIST_SUPPORT))
355 		return (0);
356 
357 	/* Determine the Capability List Pointer register to start with. */
358 	reg = pci_conf_read(pc, tag, PCI_BHLC_REG);
359 	switch (PCI_HDRTYPE_TYPE(reg)) {
360 	case 0:	/* standard device header */
361 		ofs = PCI_CAPLISTPTR_REG;
362 		break;
363 	case 2:	/* PCI-CardBus Bridge header */
364 		ofs = PCI_CARDBUS_CAPLISTPTR_REG;
365 		break;
366 	default:
367 		return (0);
368 	}
369 
370 	ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs));
371 	while (ofs != 0) {
372 #ifdef DIAGNOSTIC
373 		if ((ofs & 3) || (ofs < 0x40))
374 			panic("pci_get_capability");
375 #endif
376 		reg = pci_conf_read(pc, tag, ofs);
377 		if (PCI_CAPLIST_CAP(reg) == capid) {
378 			if (offset)
379 				*offset = ofs;
380 			if (value)
381 				*value = reg;
382 			return (1);
383 		}
384 		ofs = PCI_CAPLIST_NEXT(reg);
385 	}
386 
387 	return (0);
388 }
389 
390 int
391 pci_find_device(struct pci_attach_args *pa,
392 		int (*match)(struct pci_attach_args *))
393 {
394 	extern struct cfdriver pci_cd;
395 	struct device *pcidev;
396 	int i;
397 
398 	for (i = 0; i < pci_cd.cd_ndevs; i++) {
399 		pcidev = pci_cd.cd_devs[i];
400 		if (pcidev != NULL &&
401 		    pci_enumerate_bus((struct pci_softc *) pcidev,
402 		    		      match, pa) != 0)
403 			return (1);
404 	}
405 	return (0);
406 }
407 
408 /*
409  * Generic PCI bus enumeration routine.  Used unless machine-dependent
410  * code needs to provide something else.
411  */
412 int
413 pci_enumerate_bus_generic(struct pci_softc *sc,
414     int (*match)(struct pci_attach_args *), struct pci_attach_args *pap)
415 {
416 	pci_chipset_tag_t pc = sc->sc_pc;
417 	int device, function, nfunctions, ret;
418 	const struct pci_quirkdata *qd;
419 	pcireg_t id, bhlcr;
420 	pcitag_t tag;
421 #ifdef __PCI_BUS_DEVORDER
422 	char devs[32];
423 	int i;
424 #endif
425 
426 #ifdef __PCI_BUS_DEVORDER
427 	pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs);
428 	for (i = 0; (device = devs[i]) < 32 && device >= 0; i++)
429 #else
430 	for (device = 0; device < sc->sc_maxndevs; device++)
431 #endif
432 	{
433 		tag = pci_make_tag(pc, sc->sc_bus, device, 0);
434 		id = pci_conf_read(pc, tag, PCI_ID_REG);
435 
436 		/* Invalid vendor ID value? */
437 		if (PCI_VENDOR(id) == PCI_VENDOR_INVALID)
438 			continue;
439 		/* XXX Not invalid, but we've done this ~forever. */
440 		if (PCI_VENDOR(id) == 0)
441 			continue;
442 
443 		qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id));
444 
445 		bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG);
446 		if (PCI_HDRTYPE_MULTIFN(bhlcr) ||
447 		    (qd != NULL &&
448 		      (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0))
449 			nfunctions = 8;
450 		else
451 			nfunctions = 1;
452 
453 		for (function = 0; function < nfunctions; function++) {
454 			tag = pci_make_tag(pc, sc->sc_bus, device, function);
455 			ret = pci_probe_device(sc, tag, match, pap);
456 			if (match != NULL && ret != 0)
457 				return (ret);
458 		}
459 	}
460 	return (0);
461 }
462 
463 /*
464  * Power Management Capability (Rev 2.2)
465  */
466 
467 int
468 pci_set_powerstate(pci_chipset_tag_t pc, pcitag_t tag, int newstate)
469 {
470 	int offset;
471 	pcireg_t value, cap, now;
472 
473 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
474 		return (EOPNOTSUPP);
475 
476 	cap = value >> 16;
477 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
478 	now    = value & PCI_PMCSR_STATE_MASK;
479 	value &= ~PCI_PMCSR_STATE_MASK;
480 	switch (newstate) {
481 	case PCI_PWR_D0:
482 		if (now == PCI_PMCSR_STATE_D0)
483 			return (0);
484 		value |= PCI_PMCSR_STATE_D0;
485 		break;
486 	case PCI_PWR_D1:
487 		if (now == PCI_PMCSR_STATE_D1)
488 			return (0);
489 		if (now == PCI_PMCSR_STATE_D2 || now == PCI_PMCSR_STATE_D3)
490 			return (EINVAL);
491 		if (!(cap & PCI_PMCR_D1SUPP))
492 			return (EOPNOTSUPP);
493 		value |= PCI_PMCSR_STATE_D1;
494 		break;
495 	case PCI_PWR_D2:
496 		if (now == PCI_PMCSR_STATE_D2)
497 			return (0);
498 		if (now == PCI_PMCSR_STATE_D3)
499 			return (EINVAL);
500 		if (!(cap & PCI_PMCR_D2SUPP))
501 			return (EOPNOTSUPP);
502 		value |= PCI_PMCSR_STATE_D2;
503 		break;
504 	case PCI_PWR_D3:
505 		if (now == PCI_PMCSR_STATE_D3)
506 			return (0);
507 		value |= PCI_PMCSR_STATE_D3;
508 		break;
509 	default:
510 		return (EINVAL);
511 	}
512 	pci_conf_write(pc, tag, offset + PCI_PMCSR, value);
513 	DELAY(1000);
514 
515 	return (0);
516 }
517 
518 int
519 pci_get_powerstate(pci_chipset_tag_t pc, pcitag_t tag)
520 {
521 	int offset;
522 	pcireg_t value;
523 
524 	if (!pci_get_capability(pc, tag, PCI_CAP_PWRMGMT, &offset, &value))
525 		return (PCI_PWR_D0);
526 	value = pci_conf_read(pc, tag, offset + PCI_PMCSR);
527 	value &= PCI_PMCSR_STATE_MASK;
528 	switch (value) {
529 	case PCI_PMCSR_STATE_D0:
530 		return (PCI_PWR_D0);
531 	case PCI_PMCSR_STATE_D1:
532 		return (PCI_PWR_D1);
533 	case PCI_PMCSR_STATE_D2:
534 		return (PCI_PWR_D2);
535 	case PCI_PMCSR_STATE_D3:
536 		return (PCI_PWR_D3);
537 	}
538 
539 	return (PCI_PWR_D0);
540 }
541 
542 /*
543  * Vital Product Data (PCI 2.2)
544  */
545 
546 int
547 pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
548     pcireg_t *data)
549 {
550 	uint32_t reg;
551 	int ofs, i, j;
552 
553 	KASSERT(data != NULL);
554 	KASSERT((offset + count) < 0x7fff);
555 
556 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
557 		return (1);
558 
559 	for (i = 0; i < count; offset += sizeof(*data), i++) {
560 		reg &= 0x0000ffff;
561 		reg &= ~PCI_VPD_OPFLAG;
562 		reg |= PCI_VPD_ADDRESS(offset);
563 		pci_conf_write(pc, tag, ofs, reg);
564 
565 		/*
566 		 * PCI 2.2 does not specify how long we should poll
567 		 * for completion nor whether the operation can fail.
568 		 */
569 		j = 0;
570 		do {
571 			if (j++ == 20)
572 				return (1);
573 			delay(4);
574 			reg = pci_conf_read(pc, tag, ofs);
575 		} while ((reg & PCI_VPD_OPFLAG) == 0);
576 		data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs));
577 	}
578 
579 	return (0);
580 }
581 
582 int
583 pci_vpd_write(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count,
584     pcireg_t *data)
585 {
586 	pcireg_t reg;
587 	int ofs, i, j;
588 
589 	KASSERT(data != NULL);
590 	KASSERT((offset + count) < 0x7fff);
591 
592 	if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, &reg) == 0)
593 		return (1);
594 
595 	for (i = 0; i < count; offset += sizeof(*data), i++) {
596 		pci_conf_write(pc, tag, PCI_VPD_DATAREG(ofs), data[i]);
597 
598 		reg &= 0x0000ffff;
599 		reg |= PCI_VPD_OPFLAG;
600 		reg |= PCI_VPD_ADDRESS(offset);
601 		pci_conf_write(pc, tag, ofs, reg);
602 
603 		/*
604 		 * PCI 2.2 does not specify how long we should poll
605 		 * for completion nor whether the operation can fail.
606 		 */
607 		j = 0;
608 		do {
609 			if (j++ == 20)
610 				return (1);
611 			delay(1);
612 			reg = pci_conf_read(pc, tag, ofs);
613 		} while (reg & PCI_VPD_OPFLAG);
614 	}
615 
616 	return (0);
617 }
618 
619 int
620 pci_dma64_available(struct pci_attach_args *pa)
621 {
622 #ifdef _PCI_HAVE_DMA64
623 	if (BUS_DMA_TAG_VALID(pa->pa_dmat64) &&
624 		((uint64_t)physmem << PAGE_SHIFT) > 0xffffffffULL)
625                         return 1;
626 #endif
627         return 0;
628 }
629