xref: /netbsd-src/sys/external/bsd/drm2/include/linux/pci.h (revision f89f6560d453f5e37386cc7938c072d2f528b9fa)
1 /*	$NetBSD: pci.h,v 1.17 2015/04/06 02:29:18 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifndef _LINUX_PCI_H_
33 #define _LINUX_PCI_H_
34 
35 #ifdef _KERNEL_OPT
36 #if defined(i386) || defined(amd64)
37 #include "acpica.h"
38 #else	/* !(i386 || amd64) */
39 #define NACPICA	0
40 #endif	/* i386 || amd64 */
41 #endif
42 
43 #include <sys/types.h>
44 #include <sys/param.h>
45 #include <sys/bus.h>
46 #include <sys/cdefs.h>
47 #include <sys/kmem.h>
48 #include <sys/systm.h>
49 
50 #include <machine/limits.h>
51 
52 #include <dev/pci/pcidevs.h>
53 #include <dev/pci/pcireg.h>
54 #include <dev/pci/pcivar.h>
55 #include <dev/pci/agpvar.h>
56 
57 #include <dev/acpi/acpivar.h>
58 #include <dev/acpi/acpi_pci.h>
59 
60 #include <linux/dma-mapping.h>
61 #include <linux/ioport.h>
62 #include <linux/kernel.h>
63 
64 struct pci_bus {
65 	u_int		number;
66 };
67 
68 struct pci_device_id {
69 	uint32_t	vendor;
70 	uint32_t	device;
71 	uint32_t	subvendor;
72 	uint32_t	subdevice;
73 	uint32_t	class;
74 	uint32_t	class_mask;
75 	unsigned long	driver_data;
76 };
77 
78 #define	PCI_ANY_ID		((pcireg_t)-1)
79 
80 #define	PCI_BASE_CLASS_DISPLAY	PCI_CLASS_DISPLAY
81 
82 #define	PCI_CLASS_DISPLAY_VGA						\
83 	((PCI_CLASS_DISPLAY << 8) | PCI_SUBCLASS_DISPLAY_VGA)
84 #define	PCI_CLASS_BRIDGE_ISA						\
85 	((PCI_CLASS_BRIDGE << 8) | PCI_SUBCLASS_BRIDGE_ISA)
86 CTASSERT(PCI_CLASS_BRIDGE_ISA == 0x0601);
87 
88 /* XXX This is getting silly...  */
89 #define	PCI_VENDOR_ID_ASUSTEK	PCI_VENDOR_ASUSTEK
90 #define	PCI_VENDOR_ID_ATI	PCI_VENDOR_ATI
91 #define	PCI_VENDOR_ID_DELL	PCI_VENDOR_DELL
92 #define	PCI_VENDOR_ID_IBM	PCI_VENDOR_IBM
93 #define	PCI_VENDOR_ID_HP	PCI_VENDOR_HP
94 #define	PCI_VENDOR_ID_INTEL	PCI_VENDOR_INTEL
95 #define	PCI_VENDOR_ID_NVIDIA	PCI_VENDOR_NVIDIA
96 #define	PCI_VENDOR_ID_SONY	PCI_VENDOR_SONY
97 #define	PCI_VENDOR_ID_VIA	PCI_VENDOR_VIATECH
98 
99 #define	PCI_DEVICE_ID_ATI_RADEON_QY	PCI_PRODUCT_ATI_RADEON_RV100_QY
100 
101 #define	PCI_DEVFN(DEV, FN)						\
102 	(__SHIFTIN((DEV), __BITS(3, 7)) | __SHIFTIN((FN), __BITS(0, 2)))
103 #define	PCI_SLOT(DEVFN)		__SHIFTOUT((DEVFN), __BITS(3, 7))
104 #define	PCI_FUNC(DEVFN)		__SHIFTOUT((DEVFN), __BITS(0, 2))
105 
106 #define	PCI_NUM_RESOURCES	((PCI_MAPREG_END - PCI_MAPREG_START) / 4)
107 #define	DEVICE_COUNT_RESOURCE	PCI_NUM_RESOURCES
108 
109 #define	PCI_CAP_ID_AGP	PCI_CAP_AGP
110 
111 typedef int pci_power_t;
112 
113 #define	PCI_D0		0
114 #define	PCI_D1		1
115 #define	PCI_D2		2
116 #define	PCI_D3hot	3
117 #define	PCI_D3cold	4
118 
119 #define	__pci_iomem
120 
121 struct pci_dev {
122 	struct pci_attach_args	pd_pa;
123 	int			pd_kludges;	/* Gotta lose 'em...  */
124 #define	NBPCI_KLUDGE_GET_MUMBLE	0x01
125 #define	NBPCI_KLUDGE_MAP_ROM	0x02
126 	bus_space_tag_t		pd_rom_bst;
127 	bus_space_handle_t	pd_rom_bsh;
128 	bus_size_t		pd_rom_size;
129 	void			*pd_rom_vaddr;
130 	device_t		pd_dev;
131 	struct drm_device	*pd_drm_dev; /* XXX Nouveau kludge!  */
132 	struct {
133 		pcireg_t		type;
134 		bus_addr_t		addr;
135 		bus_size_t		size;
136 		int			flags;
137 		bus_space_tag_t		bst;
138 		bus_space_handle_t	bsh;
139 		void __pci_iomem	*kva;
140 	}			pd_resources[PCI_NUM_RESOURCES];
141 	struct pci_conf_state	*pd_saved_state;
142 	struct acpi_devnode	*pd_ad;
143 	struct device		dev;		/* XXX Don't believe me!  */
144 	struct pci_bus		*bus;
145 	uint32_t		devfn;
146 	uint16_t		vendor;
147 	uint16_t		device;
148 	uint16_t		subsystem_vendor;
149 	uint16_t		subsystem_device;
150 	uint8_t			revision;
151 	uint32_t		class;
152 	bool			msi_enabled;
153 };
154 
155 static inline device_t
156 pci_dev_dev(struct pci_dev *pdev)
157 {
158 	return pdev->pd_dev;
159 }
160 
161 /* XXX Nouveau kludge!  Don't believe me!  */
162 static inline struct pci_dev *
163 to_pci_dev(struct device *dev)
164 {
165 
166 	return container_of(dev, struct pci_dev, dev);
167 }
168 
169 /* XXX Nouveau kludge!  */
170 static inline struct drm_device *
171 pci_get_drvdata(struct pci_dev *pdev)
172 {
173 	return pdev->pd_drm_dev;
174 }
175 
176 static inline void
177 linux_pci_dev_init(struct pci_dev *pdev, device_t dev,
178     const struct pci_attach_args *pa, int kludges)
179 {
180 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
181 	    PCI_SUBSYS_ID_REG);
182 	unsigned i;
183 
184 	pdev->pd_pa = *pa;
185 	pdev->pd_kludges = kludges;
186 	pdev->pd_rom_vaddr = NULL;
187 	pdev->pd_dev = dev;
188 #if (NACPICA > 0)
189 	pdev->pd_ad = acpi_pcidev_find(0 /*XXX segment*/, pa->pa_bus,
190 	    pa->pa_device, pa->pa_function);
191 #else
192 	pdev->pd_ad = NULL;
193 #endif
194 	pdev->bus = kmem_zalloc(sizeof(struct pci_bus), KM_NOSLEEP);
195 	pdev->bus->number = pa->pa_bus;
196 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
197 	pdev->vendor = PCI_VENDOR(pa->pa_id);
198 	pdev->device = PCI_PRODUCT(pa->pa_id);
199 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
200 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
201 	pdev->revision = PCI_REVISION(pa->pa_class);
202 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
203 
204 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
205 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
206 		const int reg = PCI_BAR(i);
207 
208 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
209 		    pa->pa_tag, reg);
210 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
211 			pdev->pd_resources[i].type,
212 			&pdev->pd_resources[i].addr,
213 			&pdev->pd_resources[i].size,
214 			&pdev->pd_resources[i].flags)) {
215 			pdev->pd_resources[i].addr = 0;
216 			pdev->pd_resources[i].size = 0;
217 			pdev->pd_resources[i].flags = 0;
218 		}
219 		pdev->pd_resources[i].kva = NULL;
220 	}
221 }
222 
223 static inline int
224 pci_find_capability(struct pci_dev *pdev, int cap)
225 {
226 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
227 	    NULL, NULL);
228 }
229 
230 static inline int
231 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
232 {
233 	KASSERT(!ISSET(reg, 3));
234 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
235 	return 0;
236 }
237 
238 static inline int
239 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
240 {
241 	KASSERT(!ISSET(reg, 1));
242 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
243 	    (reg &~ 2)) >> (8 * (reg & 2));
244 	return 0;
245 }
246 
247 static inline int
248 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
249 {
250 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
251 	    (reg &~ 3)) >> (8 * (reg & 3));
252 	return 0;
253 }
254 
255 static inline int
256 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
257 {
258 	KASSERT(!ISSET(reg, 3));
259 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
260 	return 0;
261 }
262 
263 static inline void
264 pci_rmw_config(struct pci_dev *pdev, int reg, unsigned int bytes,
265     uint32_t value)
266 {
267 	const uint32_t mask = ~((~0UL) << (8 * bytes));
268 	const int reg32 = (reg &~ 3);
269 	const unsigned int shift = (8 * (reg & 3));
270 	uint32_t value32;
271 
272 	KASSERT(bytes <= 4);
273 	KASSERT(!ISSET(value, ~mask));
274 	pci_read_config_dword(pdev, reg32, &value32);
275 	value32 &=~ (mask << shift);
276 	value32 |= (value << shift);
277 	pci_write_config_dword(pdev, reg32, value32);
278 }
279 
280 static inline int
281 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
282 {
283 	KASSERT(!ISSET(reg, 1));
284 	pci_rmw_config(pdev, reg, 2, value);
285 	return 0;
286 }
287 
288 static inline int
289 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
290 {
291 	pci_rmw_config(pdev, reg, 1, value);
292 	return 0;
293 }
294 
295 /*
296  * XXX pci msi
297  */
298 static inline int
299 pci_enable_msi(struct pci_dev *pdev)
300 {
301 	return -ENOSYS;
302 }
303 
304 static inline void
305 pci_disable_msi(struct pci_dev *pdev __unused)
306 {
307 	KASSERT(pdev->msi_enabled);
308 }
309 
310 static inline void
311 pci_set_master(struct pci_dev *pdev)
312 {
313 	pcireg_t csr;
314 
315 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
316 	    PCI_COMMAND_STATUS_REG);
317 	csr |= PCI_COMMAND_MASTER_ENABLE;
318 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
319 	    PCI_COMMAND_STATUS_REG, csr);
320 }
321 
322 static inline void
323 pci_clear_master(struct pci_dev *pdev)
324 {
325 	pcireg_t csr;
326 
327 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
328 	    PCI_COMMAND_STATUS_REG);
329 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
330 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
331 	    PCI_COMMAND_STATUS_REG, csr);
332 }
333 
334 #define	PCIBIOS_MIN_MEM	0x100000	/* XXX bogus x86 kludge bollocks */
335 
336 static inline bus_addr_t
337 pcibios_align_resource(void *p, const struct resource *resource,
338     bus_addr_t addr, bus_size_t size)
339 {
340 	panic("pcibios_align_resource has accessed unaligned neurons!");
341 }
342 
343 static inline int
344 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
345     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
346     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
347 	bus_size_t) __unused,
348     struct pci_dev *pdev)
349 {
350 	const struct pci_attach_args *const pa = &pdev->pd_pa;
351 	bus_space_tag_t bst;
352 	int error;
353 
354 	switch (resource->flags) {
355 	case IORESOURCE_MEM:
356 		bst = pa->pa_memt;
357 		break;
358 
359 	case IORESOURCE_IO:
360 		bst = pa->pa_iot;
361 		break;
362 
363 	default:
364 		panic("I don't know what kind of resource you want!");
365 	}
366 
367 	resource->r_bst = bst;
368 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
369 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
370 	if (error)
371 		return error;
372 
373 	resource->size = size;
374 	return 0;
375 }
376 
377 /*
378  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
379  * defined only for their single purposes in i915drm, in
380  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
381  * generally without adapting pci_find_device (and pci_enumerate_bus
382  * internally) to pass a cookie through.
383  */
384 
385 static inline int		/* XXX inline?  */
386 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
387 {
388 
389 	if (pa->pa_bus != 0)
390 		return 0;
391 	if (pa->pa_device != 0)
392 		return 0;
393 	if (pa->pa_function != 0)
394 		return 0;
395 
396 	return 1;
397 }
398 
399 static inline struct pci_dev *
400 pci_get_bus_and_slot(int bus, int slot)
401 {
402 	struct pci_attach_args pa;
403 
404 	KASSERT(bus == 0);
405 	KASSERT(slot == PCI_DEVFN(0, 0));
406 
407 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
408 		return NULL;
409 
410 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
411 	linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
412 
413 	return pdev;
414 }
415 
416 static inline int		/* XXX inline?  */
417 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
418 {
419 
420 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
421 		return 0;
422 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
423 		return 0;
424 
425 	return 1;
426 }
427 
428 static inline void
429 pci_dev_put(struct pci_dev *pdev)
430 {
431 
432 	if (pdev == NULL)
433 		return;
434 
435 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
436 	kmem_free(pdev, sizeof(*pdev));
437 }
438 
439 static inline struct pci_dev *
440 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
441 {
442 	struct pci_attach_args pa;
443 
444 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
445 
446 	if (from != NULL) {
447 		pci_dev_put(from);
448 		return NULL;
449 	}
450 
451 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
452 		return NULL;
453 
454 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
455 	linux_pci_dev_init(pdev, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
456 
457 	return pdev;
458 }
459 
460 #define	__pci_rom_iomem
461 
462 static inline void
463 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
464 {
465 
466 	/* XXX Disable the ROM address decoder.  */
467 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
468 	KASSERT(vaddr == pdev->pd_rom_vaddr);
469 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
470 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
471 	pdev->pd_rom_vaddr = NULL;
472 }
473 
474 /* XXX Whattakludge!  Should move this in sys/arch/.  */
475 static int
476 pci_map_rom_md(struct pci_dev *pdev)
477 {
478 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
479 	const bus_addr_t rom_base = 0xc0000;
480 	const bus_size_t rom_size = 0x20000;
481 	bus_space_handle_t rom_bsh;
482 	int error;
483 
484 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
485 		return ENXIO;
486 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
487 		return ENXIO;
488 	/* XXX Check whether this is the primary VGA card?  */
489 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
490 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
491 	if (error)
492 		return ENXIO;
493 
494 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
495 	pdev->pd_rom_bsh = rom_bsh;
496 	pdev->pd_rom_size = rom_size;
497 
498 	return 0;
499 #else
500 	return ENXIO;
501 #endif
502 }
503 
504 static inline void __pci_rom_iomem *
505 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
506 {
507 	bus_space_handle_t bsh;
508 	bus_size_t size;
509 
510 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
511 
512 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
513 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
514 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
515 	    != 0 &&
516 	    pci_map_rom_md(pdev) != 0)
517 		return NULL;
518 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
519 
520 	/* XXX This type is obviously wrong in general...  */
521 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
522 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86, &bsh, &size)) {
523 		pci_unmap_rom(pdev, NULL);
524 		return NULL;
525 	}
526 
527 	KASSERT(size <= SIZE_T_MAX);
528 	*sizep = size;
529 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst, bsh);
530 	return pdev->pd_rom_vaddr;
531 }
532 
533 static inline void __pci_rom_iomem *
534 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
535 {
536 
537 	*sizep = 0;
538 	return NULL;
539 }
540 
541 static inline int
542 pci_enable_rom(struct pci_dev *pdev)
543 {
544 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
545 	const pcitag_t tag = pdev->pd_pa.pa_tag;
546 	pcireg_t addr;
547 	int s;
548 
549 	/* XXX Don't do anything if the ROM isn't there.  */
550 
551 	s = splhigh();
552 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
553 	addr |= PCI_MAPREG_ROM_ENABLE;
554 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
555 	splx(s);
556 
557 	return 0;
558 }
559 
560 static inline void
561 pci_disable_rom(struct pci_dev *pdev)
562 {
563 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
564 	const pcitag_t tag = pdev->pd_pa.pa_tag;
565 	pcireg_t addr;
566 	int s;
567 
568 	s = splhigh();
569 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
570 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
571 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
572 	splx(s);
573 }
574 
575 static inline bus_addr_t
576 pci_resource_start(struct pci_dev *pdev, unsigned i)
577 {
578 
579 	KASSERT(i < PCI_NUM_RESOURCES);
580 	return pdev->pd_resources[i].addr;
581 }
582 
583 static inline bus_size_t
584 pci_resource_len(struct pci_dev *pdev, unsigned i)
585 {
586 
587 	KASSERT(i < PCI_NUM_RESOURCES);
588 	return pdev->pd_resources[i].size;
589 }
590 
591 static inline bus_addr_t
592 pci_resource_end(struct pci_dev *pdev, unsigned i)
593 {
594 
595 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
596 }
597 
598 static inline int
599 pci_resource_flags(struct pci_dev *pdev, unsigned i)
600 {
601 
602 	KASSERT(i < PCI_NUM_RESOURCES);
603 	return pdev->pd_resources[i].flags;
604 }
605 
606 static inline void __pci_iomem *
607 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
608 {
609 	int error;
610 
611 	KASSERT(i < PCI_NUM_RESOURCES);
612 	KASSERT(pdev->pd_resources[i].kva == NULL);
613 
614 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
615 		return NULL;
616 	if (pdev->pd_resources[i].size < size)
617 		return NULL;
618 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
619 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
620 	    &pdev->pd_resources[i].bsh);
621 	if (error) {
622 		/* Horrible hack: try asking the fake AGP device.  */
623 		if (!agp_i810_borrow(pdev->pd_resources[i].addr, size,
624 			&pdev->pd_resources[i].bsh))
625 			return NULL;
626 	}
627 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
628 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
629 	    pdev->pd_resources[i].bsh);
630 
631 	return pdev->pd_resources[i].kva;
632 }
633 
634 static inline void
635 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
636 {
637 	unsigned i;
638 
639 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
640 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
641 		if (pdev->pd_resources[i].kva == kva)
642 			break;
643 	}
644 	KASSERT(i < PCI_NUM_RESOURCES);
645 
646 	pdev->pd_resources[i].kva = NULL;
647 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
648 	    pdev->pd_resources[i].size);
649 }
650 
651 static inline void
652 pci_save_state(struct pci_dev *pdev)
653 {
654 
655 	KASSERT(pdev->pd_saved_state == NULL);
656 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
657 	    KM_SLEEP);
658 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
659 	    pdev->pd_saved_state);
660 }
661 
662 static inline void
663 pci_restore_state(struct pci_dev *pdev)
664 {
665 
666 	KASSERT(pdev->pd_saved_state != NULL);
667 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
668 	    pdev->pd_saved_state);
669 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
670 	pdev->pd_saved_state = NULL;
671 }
672 
673 static inline bool
674 pci_is_pcie(struct pci_dev *pdev)
675 {
676 
677 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
678 }
679 
680 static inline bool
681 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
682 {
683 
684 	/* XXX Cop-out.  */
685 	if (mask > DMA_BIT_MASK(32))
686 		return pci_dma64_available(&pdev->pd_pa);
687 	else
688 		return true;
689 }
690 
691 #endif  /* _LINUX_PCI_H_ */
692