xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_pci.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: linux_pci.c,v 1.20 2021/12/19 12:00:16 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL_OPT
33 #include "acpica.h"
34 #include "opt_pci.h"
35 #endif
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.20 2021/12/19 12:00:16 riastradh Exp $");
39 
40 #if NACPICA > 0
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_pci.h>
43 #endif
44 
45 #include <linux/pci.h>
46 
47 #include <drm/drm_agp_netbsd.h>
48 
49 device_t
50 pci_dev_dev(struct pci_dev *pdev)
51 {
52 
53 	return pdev->pd_dev;
54 }
55 
56 void
57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58 {
59 	pdev->pd_drvdata = drvdata;
60 }
61 
62 void *
63 pci_get_drvdata(struct pci_dev *pdev)
64 {
65 	return pdev->pd_drvdata;
66 }
67 
68 const char *
69 pci_name(struct pci_dev *pdev)
70 {
71 
72 	/* XXX not sure this has the right format */
73 	return device_xname(pci_dev_dev(pdev));
74 }
75 
76 void
77 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
78     const struct pci_attach_args *pa, int kludges)
79 {
80 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
81 	    PCI_SUBSYS_ID_REG);
82 	unsigned i;
83 
84 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
85 
86 	pdev->pd_pa = *pa;
87 	pdev->pd_kludges = kludges;
88 	pdev->pd_rom_vaddr = NULL;
89 	pdev->pd_dev = dev;
90 #if (NACPICA > 0)
91 #ifdef __HAVE_PCI_GET_SEGMENT
92 	const int seg = pci_get_segment(pa->pa_pc);
93 #else
94 	const int seg = 0;
95 #endif
96 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
97 	    pa->pa_device, pa->pa_function);
98 #else
99 	pdev->pd_ad = NULL;
100 #endif
101 	pdev->pd_saved_state = NULL;
102 	pdev->pd_intr_handles = NULL;
103 	pdev->pd_drvdata = NULL;
104 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
105 	pdev->bus->pb_pc = pa->pa_pc;
106 	pdev->bus->pb_dev = parent;
107 	pdev->bus->number = pa->pa_bus;
108 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
109 	pdev->vendor = PCI_VENDOR(pa->pa_id);
110 	pdev->device = PCI_PRODUCT(pa->pa_id);
111 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
112 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
113 	pdev->revision = PCI_REVISION(pa->pa_class);
114 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
115 
116 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
117 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
118 		const int reg = PCI_BAR(i);
119 
120 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
121 		    pa->pa_tag, reg);
122 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
123 			pdev->pd_resources[i].type,
124 			&pdev->pd_resources[i].addr,
125 			&pdev->pd_resources[i].size,
126 			&pdev->pd_resources[i].flags)) {
127 			pdev->pd_resources[i].addr = 0;
128 			pdev->pd_resources[i].size = 0;
129 			pdev->pd_resources[i].flags = 0;
130 		}
131 		pdev->pd_resources[i].kva = NULL;
132 		pdev->pd_resources[i].mapped = false;
133 	}
134 }
135 
136 int
137 pci_find_capability(struct pci_dev *pdev, int cap)
138 {
139 
140 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
141 	    NULL, NULL);
142 }
143 
144 int
145 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
146 {
147 
148 	KASSERT(!ISSET(reg, 3));
149 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
150 	return 0;
151 }
152 
153 int
154 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
155 {
156 
157 	KASSERT(!ISSET(reg, 1));
158 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
159 	    (reg &~ 2)) >> (8 * (reg & 2));
160 	return 0;
161 }
162 
163 int
164 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
165 {
166 
167 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
168 	    (reg &~ 3)) >> (8 * (reg & 3));
169 	return 0;
170 }
171 
172 int
173 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
174 {
175 
176 	KASSERT(!ISSET(reg, 3));
177 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
178 	return 0;
179 }
180 
181 int
182 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
183     uint32_t *valuep)
184 {
185 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
186 	    PCI_FUNC(devfn));
187 
188 	KASSERT(!ISSET(reg, 1));
189 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
190 	return 0;
191 }
192 
193 int
194 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
195     uint16_t *valuep)
196 {
197 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
198 	    PCI_FUNC(devfn));
199 
200 	KASSERT(!ISSET(reg, 1));
201 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
202 	return 0;
203 }
204 
205 int
206 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
207     uint8_t *valuep)
208 {
209 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
210 	    PCI_FUNC(devfn));
211 
212 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
213 	return 0;
214 }
215 
216 int
217 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
218     uint32_t value)
219 {
220 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
221 	    PCI_FUNC(devfn));
222 
223 	KASSERT(!ISSET(reg, 3));
224 	pci_conf_write(bus->pb_pc, tag, reg, value);
225 	return 0;
226 }
227 
228 static void
229 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
230     uint32_t value)
231 {
232 	const uint32_t mask = ~((~0UL) << (8 * bytes));
233 	const int reg32 = (reg &~ 3);
234 	const unsigned int shift = (8 * (reg & 3));
235 	uint32_t value32;
236 
237 	KASSERT(bytes <= 4);
238 	KASSERT(!ISSET(value, ~mask));
239 	value32 = pci_conf_read(pc, tag, reg32);
240 	value32 &=~ (mask << shift);
241 	value32 |= (value << shift);
242 	pci_conf_write(pc, tag, reg32, value32);
243 }
244 
245 int
246 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
247 {
248 
249 	KASSERT(!ISSET(reg, 1));
250 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
251 	return 0;
252 }
253 
254 int
255 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
256 {
257 
258 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
259 	return 0;
260 }
261 
262 int
263 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
264     uint16_t value)
265 {
266 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
267 	    PCI_FUNC(devfn));
268 
269 	KASSERT(!ISSET(reg, 1));
270 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
271 	return 0;
272 }
273 
274 int
275 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
276     uint8_t value)
277 {
278 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
279 	    PCI_FUNC(devfn));
280 
281 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
282 	return 0;
283 }
284 
285 int
286 pci_enable_msi(struct pci_dev *pdev)
287 {
288 	const struct pci_attach_args *const pa = &pdev->pd_pa;
289 
290 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
291 		return -EINVAL;
292 
293 	pdev->msi_enabled = 1;
294 	return 0;
295 }
296 
297 void
298 pci_disable_msi(struct pci_dev *pdev __unused)
299 {
300 	const struct pci_attach_args *const pa = &pdev->pd_pa;
301 
302 	if (pdev->pd_intr_handles != NULL) {
303 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
304 		pdev->pd_intr_handles = NULL;
305 	}
306 	pdev->msi_enabled = 0;
307 }
308 
309 void
310 pci_set_master(struct pci_dev *pdev)
311 {
312 	pcireg_t csr;
313 
314 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
315 	    PCI_COMMAND_STATUS_REG);
316 	csr |= PCI_COMMAND_MASTER_ENABLE;
317 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
318 	    PCI_COMMAND_STATUS_REG, csr);
319 }
320 
321 void
322 pci_clear_master(struct pci_dev *pdev)
323 {
324 	pcireg_t csr;
325 
326 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
327 	    PCI_COMMAND_STATUS_REG);
328 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
329 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
330 	    PCI_COMMAND_STATUS_REG, csr);
331 }
332 
333 bus_addr_t
334 pcibios_align_resource(void *p, const struct resource *resource,
335     bus_addr_t addr, bus_size_t size)
336 {
337 	panic("pcibios_align_resource has accessed unaligned neurons!");
338 }
339 
340 int
341 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
342     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
343     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
344 	bus_size_t) __unused,
345     struct pci_dev *pdev)
346 {
347 	const struct pci_attach_args *const pa = &pdev->pd_pa;
348 	bus_space_tag_t bst;
349 	int error;
350 
351 	switch (resource->flags) {
352 	case IORESOURCE_MEM:
353 		bst = pa->pa_memt;
354 		break;
355 
356 	case IORESOURCE_IO:
357 		bst = pa->pa_iot;
358 		break;
359 
360 	default:
361 		panic("I don't know what kind of resource you want!");
362 	}
363 
364 	resource->r_bst = bst;
365 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
366 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
367 	if (error)
368 		return error;
369 
370 	resource->end = start + (size - 1);
371 	return 0;
372 }
373 
374 /*
375  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
376  * defined only for their single purposes in i915drm, in
377  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
378  * generally without adapting pci_find_device (and pci_enumerate_bus
379  * internally) to pass a cookie through.
380  */
381 
382 static int
383 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
384 {
385 
386 	/* XXX domain */
387 	if (pa->pa_bus != 0)
388 		return 0;
389 	if (pa->pa_device != 0)
390 		return 0;
391 	if (pa->pa_function != 0)
392 		return 0;
393 
394 	return 1;
395 }
396 
397 struct pci_dev *
398 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
399 {
400 	struct pci_attach_args pa;
401 
402 	KASSERT(domain == 0);
403 	KASSERT(bus == 0);
404 	KASSERT(slot == PCI_DEVFN(0, 0));
405 
406 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
407 		return NULL;
408 
409 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
410 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
411 
412 	return pdev;
413 }
414 
415 static int
416 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
417 {
418 
419 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
420 		return 0;
421 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
422 		return 0;
423 
424 	return 1;
425 }
426 
427 void
428 pci_dev_put(struct pci_dev *pdev)
429 {
430 
431 	if (pdev == NULL)
432 		return;
433 
434 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
435 	kmem_free(pdev->bus, sizeof(*pdev->bus));
436 	kmem_free(pdev, sizeof(*pdev));
437 }
438 
439 struct pci_dev *		/* XXX i915 kludge */
440 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
441 {
442 	struct pci_attach_args pa;
443 
444 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
445 
446 	if (from != NULL) {
447 		pci_dev_put(from);
448 		return NULL;
449 	}
450 
451 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
452 		return NULL;
453 
454 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
455 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
456 
457 	return pdev;
458 }
459 
460 int
461 pci_dev_present(const struct pci_device_id *ids)
462 {
463 
464 	/* XXX implement me -- pci_find_device doesn't pass a cookie */
465 	return 0;
466 }
467 
468 void
469 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
470 {
471 
472 	/* XXX Disable the ROM address decoder.  */
473 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
474 	KASSERT(vaddr == pdev->pd_rom_vaddr);
475 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
476 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
477 	pdev->pd_rom_vaddr = NULL;
478 }
479 
480 /* XXX Whattakludge!  Should move this in sys/arch/.  */
481 static int
482 pci_map_rom_md(struct pci_dev *pdev)
483 {
484 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
485 	const bus_addr_t rom_base = 0xc0000;
486 	const bus_size_t rom_size = 0x20000;
487 	bus_space_handle_t rom_bsh;
488 	int error;
489 
490 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
491 		return ENXIO;
492 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
493 		return ENXIO;
494 	/* XXX Check whether this is the primary VGA card?  */
495 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
496 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
497 	if (error)
498 		return ENXIO;
499 
500 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
501 	pdev->pd_rom_bsh = rom_bsh;
502 	pdev->pd_rom_size = rom_size;
503 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
504 
505 	return 0;
506 #else
507 	return ENXIO;
508 #endif
509 }
510 
511 void __pci_rom_iomem *
512 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
513 {
514 
515 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
516 
517 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
518 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
519 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
520 	    != 0)
521 		goto fail_mi;
522 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
523 
524 	/* XXX This type is obviously wrong in general...  */
525 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
526 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
527 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
528 		pci_unmap_rom(pdev, NULL);
529 		goto fail_mi;
530 	}
531 	goto success;
532 
533 fail_mi:
534 	if (pci_map_rom_md(pdev) != 0)
535 		goto fail_md;
536 
537 	/* XXX This type is obviously wrong in general...  */
538 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
539 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
540 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
541 		pci_unmap_rom(pdev, NULL);
542 		goto fail_md;
543 	}
544 
545 success:
546 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
547 	*sizep = pdev->pd_rom_found_size;
548 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
549 	    pdev->pd_rom_found_bsh);
550 	return pdev->pd_rom_vaddr;
551 
552 fail_md:
553 	return NULL;
554 }
555 
556 void __pci_rom_iomem *
557 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
558 {
559 
560 	*sizep = 0;
561 	return NULL;
562 }
563 
564 int
565 pci_enable_rom(struct pci_dev *pdev)
566 {
567 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
568 	const pcitag_t tag = pdev->pd_pa.pa_tag;
569 	pcireg_t addr;
570 	int s;
571 
572 	/* XXX Don't do anything if the ROM isn't there.  */
573 
574 	s = splhigh();
575 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
576 	addr |= PCI_MAPREG_ROM_ENABLE;
577 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
578 	splx(s);
579 
580 	return 0;
581 }
582 
583 void
584 pci_disable_rom(struct pci_dev *pdev)
585 {
586 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
587 	const pcitag_t tag = pdev->pd_pa.pa_tag;
588 	pcireg_t addr;
589 	int s;
590 
591 	s = splhigh();
592 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
593 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
594 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
595 	splx(s);
596 }
597 
598 bus_addr_t
599 pci_resource_start(struct pci_dev *pdev, unsigned i)
600 {
601 
602 	KASSERT(i < PCI_NUM_RESOURCES);
603 	return pdev->pd_resources[i].addr;
604 }
605 
606 bus_size_t
607 pci_resource_len(struct pci_dev *pdev, unsigned i)
608 {
609 
610 	KASSERT(i < PCI_NUM_RESOURCES);
611 	return pdev->pd_resources[i].size;
612 }
613 
614 bus_addr_t
615 pci_resource_end(struct pci_dev *pdev, unsigned i)
616 {
617 
618 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
619 }
620 
621 int
622 pci_resource_flags(struct pci_dev *pdev, unsigned i)
623 {
624 
625 	KASSERT(i < PCI_NUM_RESOURCES);
626 	return pdev->pd_resources[i].flags;
627 }
628 
629 void __pci_iomem *
630 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
631 {
632 	int error;
633 
634 	KASSERT(i < PCI_NUM_RESOURCES);
635 	KASSERT(pdev->pd_resources[i].kva == NULL);
636 
637 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
638 		return NULL;
639 	if (pdev->pd_resources[i].size < size)
640 		return NULL;
641 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
642 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
643 	    &pdev->pd_resources[i].bsh);
644 	if (error)
645 		return NULL;
646 	/* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c.  */
647 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
648 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
649 	    pdev->pd_resources[i].bsh);
650 	pdev->pd_resources[i].mapped = true;
651 
652 	return pdev->pd_resources[i].kva;
653 }
654 
655 void
656 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
657 {
658 	unsigned i;
659 
660 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
661 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
662 		if (pdev->pd_resources[i].kva == kva)
663 			break;
664 	}
665 	KASSERT(i < PCI_NUM_RESOURCES);
666 
667 	pdev->pd_resources[i].kva = NULL;
668 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
669 	    pdev->pd_resources[i].size);
670 }
671 
672 void
673 pci_save_state(struct pci_dev *pdev)
674 {
675 
676 	KASSERT(pdev->pd_saved_state == NULL);
677 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
678 	    KM_SLEEP);
679 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
680 	    pdev->pd_saved_state);
681 }
682 
683 void
684 pci_restore_state(struct pci_dev *pdev)
685 {
686 
687 	KASSERT(pdev->pd_saved_state != NULL);
688 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
689 	    pdev->pd_saved_state);
690 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
691 	pdev->pd_saved_state = NULL;
692 }
693 
694 bool
695 pci_is_pcie(struct pci_dev *pdev)
696 {
697 
698 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
699 }
700 
701 bool
702 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
703 {
704 
705 	/* XXX Cop-out.  */
706 	if (mask > DMA_BIT_MASK(32))
707 		return pci_dma64_available(&pdev->pd_pa);
708 	else
709 		return true;
710 }
711 
712 bool
713 pci_is_thunderbolt_attached(struct pci_dev *pdev)
714 {
715 
716 	/* XXX Cop-out.  */
717 	return false;
718 }
719 
720 bool
721 pci_is_root_bus(struct pci_bus *bus)
722 {
723 
724 	/* XXX Cop-out. */
725 	return false;
726 }
727 
728 int
729 pci_domain_nr(struct pci_bus *bus)
730 {
731 
732 	return device_unit(bus->pb_dev);
733 }
734 
735 /*
736  * We explicitly rename pci_enable/disable_device so that you have to
737  * review each use of them, since NetBSD's PCI API does _not_ respect
738  * our local enablecnt here, but there are different parts of NetBSD
739  * that automatically enable/disable like PMF, so you have to decide
740  * for each one whether to call it or not.
741  */
742 
743 int
744 linux_pci_enable_device(struct pci_dev *pdev)
745 {
746 	const struct pci_attach_args *pa = &pdev->pd_pa;
747 	pcireg_t csr;
748 	int s;
749 
750 	if (pdev->pd_enablecnt++)
751 		return 0;
752 
753 	s = splhigh();
754 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
755 	/* If someone else (firmware) already enabled it, credit them.  */
756 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
757 		pdev->pd_enablecnt++;
758 	csr |= PCI_COMMAND_IO_ENABLE;
759 	csr |= PCI_COMMAND_MEM_ENABLE;
760 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
761 	splx(s);
762 
763 	return 0;
764 }
765 
766 void
767 linux_pci_disable_device(struct pci_dev *pdev)
768 {
769 	const struct pci_attach_args *pa = &pdev->pd_pa;
770 	pcireg_t csr;
771 	int s;
772 
773 	if (--pdev->pd_enablecnt)
774 		return;
775 
776 	s = splhigh();
777 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
778 	csr &= ~PCI_COMMAND_IO_ENABLE;
779 	csr &= ~PCI_COMMAND_MEM_ENABLE;
780 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
781 	splx(s);
782 }
783 
784 void
785 linux_pci_dev_destroy(struct pci_dev *pdev)
786 {
787 	unsigned i;
788 
789 	if (pdev->bus != NULL) {
790 		kmem_free(pdev->bus, sizeof(*pdev->bus));
791 		pdev->bus = NULL;
792 	}
793 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
794 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
795 		pdev->pd_rom_vaddr = 0;
796 	}
797 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
798 		if (!pdev->pd_resources[i].mapped)
799 			continue;
800 		bus_space_unmap(pdev->pd_resources[i].bst,
801 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
802 	}
803 
804 	/* There is no way these should be still in use.  */
805 	KASSERT(pdev->pd_saved_state == NULL);
806 	KASSERT(pdev->pd_intr_handles == NULL);
807 }
808