xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_pci.c (revision d09c86808c95da28ab300cf7cb98b9bf51c9d7a8)
1 /*	$NetBSD: linux_pci.c,v 1.11 2021/12/19 01:21:08 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL_OPT
33 #include "opt_pci.h"
34 #endif
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.11 2021/12/19 01:21:08 riastradh Exp $");
38 
39 #include <linux/pci.h>
40 
41 #include <drm/drm_agp_netbsd.h>
42 
43 device_t
44 pci_dev_dev(struct pci_dev *pdev)
45 {
46 
47 	return pdev->pd_dev;
48 }
49 
50 /* XXX Nouveau kludge!  */
51 struct drm_device *
52 pci_get_drvdata(struct pci_dev *pdev)
53 {
54 
55 	return pdev->pd_drm_dev;
56 }
57 
58 void
59 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
60     const struct pci_attach_args *pa, int kludges)
61 {
62 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
63 	    PCI_SUBSYS_ID_REG);
64 	unsigned i;
65 
66 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
67 
68 	pdev->pd_pa = *pa;
69 	pdev->pd_kludges = kludges;
70 	pdev->pd_rom_vaddr = NULL;
71 	pdev->pd_dev = dev;
72 #if (NACPICA > 0)
73 #ifdef __HAVE_PCI_GET_SEGMENT
74 	const int seg = pci_get_segment(pa->pa_pc);
75 #else
76 	const int seg = 0;
77 #endif
78 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
79 	    pa->pa_device, pa->pa_function);
80 #else
81 	pdev->pd_ad = NULL;
82 #endif
83 	pdev->pd_saved_state = NULL;
84 	pdev->pd_intr_handles = NULL;
85 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
86 	pdev->bus->pb_pc = pa->pa_pc;
87 	pdev->bus->pb_dev = parent;
88 	pdev->bus->number = pa->pa_bus;
89 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
90 	pdev->vendor = PCI_VENDOR(pa->pa_id);
91 	pdev->device = PCI_PRODUCT(pa->pa_id);
92 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
93 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
94 	pdev->revision = PCI_REVISION(pa->pa_class);
95 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
96 
97 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
98 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
99 		const int reg = PCI_BAR(i);
100 
101 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
102 		    pa->pa_tag, reg);
103 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
104 			pdev->pd_resources[i].type,
105 			&pdev->pd_resources[i].addr,
106 			&pdev->pd_resources[i].size,
107 			&pdev->pd_resources[i].flags)) {
108 			pdev->pd_resources[i].addr = 0;
109 			pdev->pd_resources[i].size = 0;
110 			pdev->pd_resources[i].flags = 0;
111 		}
112 		pdev->pd_resources[i].kva = NULL;
113 		pdev->pd_resources[i].mapped = false;
114 	}
115 }
116 
117 int
118 pci_find_capability(struct pci_dev *pdev, int cap)
119 {
120 
121 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
122 	    NULL, NULL);
123 }
124 
125 int
126 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
127 {
128 
129 	KASSERT(!ISSET(reg, 3));
130 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
131 	return 0;
132 }
133 
134 int
135 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
136 {
137 
138 	KASSERT(!ISSET(reg, 1));
139 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
140 	    (reg &~ 2)) >> (8 * (reg & 2));
141 	return 0;
142 }
143 
144 int
145 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
146 {
147 
148 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
149 	    (reg &~ 3)) >> (8 * (reg & 3));
150 	return 0;
151 }
152 
153 int
154 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
155 {
156 
157 	KASSERT(!ISSET(reg, 3));
158 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
159 	return 0;
160 }
161 
162 int
163 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
164     uint32_t *valuep)
165 {
166 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
167 	    PCI_FUNC(devfn));
168 
169 	KASSERT(!ISSET(reg, 1));
170 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
171 	return 0;
172 }
173 
174 int
175 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
176     uint16_t *valuep)
177 {
178 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
179 	    PCI_FUNC(devfn));
180 
181 	KASSERT(!ISSET(reg, 1));
182 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
183 	return 0;
184 }
185 
186 int
187 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
188     uint8_t *valuep)
189 {
190 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
191 	    PCI_FUNC(devfn));
192 
193 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
194 	return 0;
195 }
196 
197 int
198 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
199     uint32_t value)
200 {
201 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
202 	    PCI_FUNC(devfn));
203 
204 	KASSERT(!ISSET(reg, 3));
205 	pci_conf_write(bus->pb_pc, tag, reg, value);
206 	return 0;
207 }
208 
209 static void
210 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
211     uint32_t value)
212 {
213 	const uint32_t mask = ~((~0UL) << (8 * bytes));
214 	const int reg32 = (reg &~ 3);
215 	const unsigned int shift = (8 * (reg & 3));
216 	uint32_t value32;
217 
218 	KASSERT(bytes <= 4);
219 	KASSERT(!ISSET(value, ~mask));
220 	value32 = pci_conf_read(pc, tag, reg32);
221 	value32 &=~ (mask << shift);
222 	value32 |= (value << shift);
223 	pci_conf_write(pc, tag, reg32, value32);
224 }
225 
226 int
227 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
228 {
229 
230 	KASSERT(!ISSET(reg, 1));
231 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
232 	return 0;
233 }
234 
235 int
236 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
237 {
238 
239 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
240 	return 0;
241 }
242 
243 int
244 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
245     uint16_t value)
246 {
247 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
248 	    PCI_FUNC(devfn));
249 
250 	KASSERT(!ISSET(reg, 1));
251 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
252 	return 0;
253 }
254 
255 int
256 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
257     uint8_t value)
258 {
259 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
260 	    PCI_FUNC(devfn));
261 
262 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
263 	return 0;
264 }
265 
266 int
267 pci_enable_msi(struct pci_dev *pdev)
268 {
269 	const struct pci_attach_args *const pa = &pdev->pd_pa;
270 
271 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
272 		return -EINVAL;
273 
274 	pdev->msi_enabled = 1;
275 	return 0;
276 }
277 
278 void
279 pci_disable_msi(struct pci_dev *pdev __unused)
280 {
281 	const struct pci_attach_args *const pa = &pdev->pd_pa;
282 
283 	if (pdev->pd_intr_handles != NULL) {
284 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
285 		pdev->pd_intr_handles = NULL;
286 	}
287 	pdev->msi_enabled = 0;
288 }
289 
290 void
291 pci_set_master(struct pci_dev *pdev)
292 {
293 	pcireg_t csr;
294 
295 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
296 	    PCI_COMMAND_STATUS_REG);
297 	csr |= PCI_COMMAND_MASTER_ENABLE;
298 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
299 	    PCI_COMMAND_STATUS_REG, csr);
300 }
301 
302 void
303 pci_clear_master(struct pci_dev *pdev)
304 {
305 	pcireg_t csr;
306 
307 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
308 	    PCI_COMMAND_STATUS_REG);
309 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
310 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
311 	    PCI_COMMAND_STATUS_REG, csr);
312 }
313 
314 bus_addr_t
315 pcibios_align_resource(void *p, const struct resource *resource,
316     bus_addr_t addr, bus_size_t size)
317 {
318 	panic("pcibios_align_resource has accessed unaligned neurons!");
319 }
320 
321 int
322 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
323     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
324     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
325 	bus_size_t) __unused,
326     struct pci_dev *pdev)
327 {
328 	const struct pci_attach_args *const pa = &pdev->pd_pa;
329 	bus_space_tag_t bst;
330 	int error;
331 
332 	switch (resource->flags) {
333 	case IORESOURCE_MEM:
334 		bst = pa->pa_memt;
335 		break;
336 
337 	case IORESOURCE_IO:
338 		bst = pa->pa_iot;
339 		break;
340 
341 	default:
342 		panic("I don't know what kind of resource you want!");
343 	}
344 
345 	resource->r_bst = bst;
346 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
347 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
348 	if (error)
349 		return error;
350 
351 	resource->size = size;
352 	return 0;
353 }
354 
355 /*
356  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
357  * defined only for their single purposes in i915drm, in
358  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
359  * generally without adapting pci_find_device (and pci_enumerate_bus
360  * internally) to pass a cookie through.
361  */
362 
363 static int
364 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
365 {
366 
367 	/* XXX domain */
368 	if (pa->pa_bus != 0)
369 		return 0;
370 	if (pa->pa_device != 0)
371 		return 0;
372 	if (pa->pa_function != 0)
373 		return 0;
374 
375 	return 1;
376 }
377 
378 struct pci_dev *
379 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
380 {
381 	struct pci_attach_args pa;
382 
383 	KASSERT(domain == 0);
384 	KASSERT(bus == 0);
385 	KASSERT(slot == PCI_DEVFN(0, 0));
386 
387 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
388 		return NULL;
389 
390 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
391 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
392 
393 	return pdev;
394 }
395 
396 static int
397 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
398 {
399 
400 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
401 		return 0;
402 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
403 		return 0;
404 
405 	return 1;
406 }
407 
408 void
409 pci_dev_put(struct pci_dev *pdev)
410 {
411 
412 	if (pdev == NULL)
413 		return;
414 
415 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
416 	kmem_free(pdev->bus, sizeof(*pdev->bus));
417 	kmem_free(pdev, sizeof(*pdev));
418 }
419 
420 struct pci_dev *		/* XXX i915 kludge */
421 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
422 {
423 	struct pci_attach_args pa;
424 
425 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
426 
427 	if (from != NULL) {
428 		pci_dev_put(from);
429 		return NULL;
430 	}
431 
432 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
433 		return NULL;
434 
435 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
436 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
437 
438 	return pdev;
439 }
440 
441 void
442 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
443 {
444 
445 	/* XXX Disable the ROM address decoder.  */
446 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
447 	KASSERT(vaddr == pdev->pd_rom_vaddr);
448 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
449 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
450 	pdev->pd_rom_vaddr = NULL;
451 }
452 
453 /* XXX Whattakludge!  Should move this in sys/arch/.  */
454 static int
455 pci_map_rom_md(struct pci_dev *pdev)
456 {
457 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
458 	const bus_addr_t rom_base = 0xc0000;
459 	const bus_size_t rom_size = 0x20000;
460 	bus_space_handle_t rom_bsh;
461 	int error;
462 
463 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
464 		return ENXIO;
465 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
466 		return ENXIO;
467 	/* XXX Check whether this is the primary VGA card?  */
468 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
469 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
470 	if (error)
471 		return ENXIO;
472 
473 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
474 	pdev->pd_rom_bsh = rom_bsh;
475 	pdev->pd_rom_size = rom_size;
476 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
477 
478 	return 0;
479 #else
480 	return ENXIO;
481 #endif
482 }
483 
484 void __pci_rom_iomem *
485 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
486 {
487 
488 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
489 
490 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
491 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
492 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
493 	    != 0)
494 		goto fail_mi;
495 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
496 
497 	/* XXX This type is obviously wrong in general...  */
498 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
499 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
500 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
501 		pci_unmap_rom(pdev, NULL);
502 		goto fail_mi;
503 	}
504 	goto success;
505 
506 fail_mi:
507 	if (pci_map_rom_md(pdev) != 0)
508 		goto fail_md;
509 
510 	/* XXX This type is obviously wrong in general...  */
511 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
512 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
513 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
514 		pci_unmap_rom(pdev, NULL);
515 		goto fail_md;
516 	}
517 
518 success:
519 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
520 	*sizep = pdev->pd_rom_found_size;
521 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
522 	    pdev->pd_rom_found_bsh);
523 	return pdev->pd_rom_vaddr;
524 
525 fail_md:
526 	return NULL;
527 }
528 
529 void __pci_rom_iomem *
530 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
531 {
532 
533 	*sizep = 0;
534 	return NULL;
535 }
536 
537 int
538 pci_enable_rom(struct pci_dev *pdev)
539 {
540 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
541 	const pcitag_t tag = pdev->pd_pa.pa_tag;
542 	pcireg_t addr;
543 	int s;
544 
545 	/* XXX Don't do anything if the ROM isn't there.  */
546 
547 	s = splhigh();
548 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
549 	addr |= PCI_MAPREG_ROM_ENABLE;
550 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
551 	splx(s);
552 
553 	return 0;
554 }
555 
556 void
557 pci_disable_rom(struct pci_dev *pdev)
558 {
559 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
560 	const pcitag_t tag = pdev->pd_pa.pa_tag;
561 	pcireg_t addr;
562 	int s;
563 
564 	s = splhigh();
565 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
566 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
567 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
568 	splx(s);
569 }
570 
571 bus_addr_t
572 pci_resource_start(struct pci_dev *pdev, unsigned i)
573 {
574 
575 	KASSERT(i < PCI_NUM_RESOURCES);
576 	return pdev->pd_resources[i].addr;
577 }
578 
579 bus_size_t
580 pci_resource_len(struct pci_dev *pdev, unsigned i)
581 {
582 
583 	KASSERT(i < PCI_NUM_RESOURCES);
584 	return pdev->pd_resources[i].size;
585 }
586 
587 bus_addr_t
588 pci_resource_end(struct pci_dev *pdev, unsigned i)
589 {
590 
591 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
592 }
593 
594 int
595 pci_resource_flags(struct pci_dev *pdev, unsigned i)
596 {
597 
598 	KASSERT(i < PCI_NUM_RESOURCES);
599 	return pdev->pd_resources[i].flags;
600 }
601 
602 void __pci_iomem *
603 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
604 {
605 	int error;
606 
607 	KASSERT(i < PCI_NUM_RESOURCES);
608 	KASSERT(pdev->pd_resources[i].kva == NULL);
609 
610 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
611 		return NULL;
612 	if (pdev->pd_resources[i].size < size)
613 		return NULL;
614 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
615 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
616 	    &pdev->pd_resources[i].bsh);
617 	if (error)
618 		return NULL;
619 	/* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c.  */
620 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
621 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
622 	    pdev->pd_resources[i].bsh);
623 	pdev->pd_resources[i].mapped = true;
624 
625 	return pdev->pd_resources[i].kva;
626 }
627 
628 void
629 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
630 {
631 	unsigned i;
632 
633 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
634 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
635 		if (pdev->pd_resources[i].kva == kva)
636 			break;
637 	}
638 	KASSERT(i < PCI_NUM_RESOURCES);
639 
640 	pdev->pd_resources[i].kva = NULL;
641 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
642 	    pdev->pd_resources[i].size);
643 }
644 
645 void
646 pci_save_state(struct pci_dev *pdev)
647 {
648 
649 	KASSERT(pdev->pd_saved_state == NULL);
650 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
651 	    KM_SLEEP);
652 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
653 	    pdev->pd_saved_state);
654 }
655 
656 void
657 pci_restore_state(struct pci_dev *pdev)
658 {
659 
660 	KASSERT(pdev->pd_saved_state != NULL);
661 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
662 	    pdev->pd_saved_state);
663 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
664 	pdev->pd_saved_state = NULL;
665 }
666 
667 bool
668 pci_is_pcie(struct pci_dev *pdev)
669 {
670 
671 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
672 }
673 
674 bool
675 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
676 {
677 
678 	/* XXX Cop-out.  */
679 	if (mask > DMA_BIT_MASK(32))
680 		return pci_dma64_available(&pdev->pd_pa);
681 	else
682 		return true;
683 }
684 
685 bool
686 pci_is_root_bus(struct pci_bus *bus)
687 {
688 
689 	/* XXX Cop-out. */
690 	return false;
691 }
692 
693 int
694 pci_domain_nr(struct pci_bus *bus)
695 {
696 
697 	return device_unit(bus->pb_dev);
698 }
699 
700 /*
701  * We explicitly rename pci_enable/disable_device so that you have to
702  * review each use of them, since NetBSD's PCI API does _not_ respect
703  * our local enablecnt here, but there are different parts of NetBSD
704  * that automatically enable/disable like PMF, so you have to decide
705  * for each one whether to call it or not.
706  */
707 
708 int
709 linux_pci_enable_device(struct pci_dev *pdev)
710 {
711 	const struct pci_attach_args *pa = &pdev->pd_pa;
712 	pcireg_t csr;
713 	int s;
714 
715 	if (pdev->pd_enablecnt++)
716 		return 0;
717 
718 	s = splhigh();
719 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
720 	/* If someone else (firmware) already enabled it, credit them.  */
721 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
722 		pdev->pd_enablecnt++;
723 	csr |= PCI_COMMAND_IO_ENABLE;
724 	csr |= PCI_COMMAND_MEM_ENABLE;
725 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
726 	splx(s);
727 
728 	return 0;
729 }
730 
731 void
732 linux_pci_disable_device(struct pci_dev *pdev)
733 {
734 	const struct pci_attach_args *pa = &pdev->pd_pa;
735 	pcireg_t csr;
736 	int s;
737 
738 	if (--pdev->pd_enablecnt)
739 		return;
740 
741 	s = splhigh();
742 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
743 	csr &= ~PCI_COMMAND_IO_ENABLE;
744 	csr &= ~PCI_COMMAND_MEM_ENABLE;
745 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
746 	splx(s);
747 }
748 
749 void
750 linux_pci_dev_destroy(struct pci_dev *pdev)
751 {
752 	unsigned i;
753 
754 	if (pdev->bus != NULL) {
755 		kmem_free(pdev->bus, sizeof(*pdev->bus));
756 		pdev->bus = NULL;
757 	}
758 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
759 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
760 		pdev->pd_rom_vaddr = 0;
761 	}
762 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
763 		if (!pdev->pd_resources[i].mapped)
764 			continue;
765 		bus_space_unmap(pdev->pd_resources[i].bst,
766 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
767 	}
768 
769 	/* There is no way these should be still in use.  */
770 	KASSERT(pdev->pd_saved_state == NULL);
771 	KASSERT(pdev->pd_intr_handles == NULL);
772 }
773