xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_pci.c (revision 03ec332021e74db8c4cecb0e6ec099cb3a03963a)
1 /*	$NetBSD: linux_pci.c,v 1.17 2021/12/19 10:59:48 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL_OPT
33 #include "acpica.h"
34 #include "opt_pci.h"
35 #endif
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.17 2021/12/19 10:59:48 riastradh Exp $");
39 
40 #if NACPICA > 0
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_pci.h>
43 #endif
44 
45 #include <linux/pci.h>
46 
47 #include <drm/drm_agp_netbsd.h>
48 
49 device_t
50 pci_dev_dev(struct pci_dev *pdev)
51 {
52 
53 	return pdev->pd_dev;
54 }
55 
56 void
57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58 {
59 	pdev->pd_drvdata = drvdata;
60 }
61 
62 void *
63 pci_get_drvdata(struct pci_dev *pdev)
64 {
65 	return pdev->pd_drvdata;
66 }
67 
68 void
69 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
70     const struct pci_attach_args *pa, int kludges)
71 {
72 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
73 	    PCI_SUBSYS_ID_REG);
74 	unsigned i;
75 
76 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
77 
78 	pdev->pd_pa = *pa;
79 	pdev->pd_kludges = kludges;
80 	pdev->pd_rom_vaddr = NULL;
81 	pdev->pd_dev = dev;
82 #if (NACPICA > 0)
83 #ifdef __HAVE_PCI_GET_SEGMENT
84 	const int seg = pci_get_segment(pa->pa_pc);
85 #else
86 	const int seg = 0;
87 #endif
88 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
89 	    pa->pa_device, pa->pa_function);
90 #else
91 	pdev->pd_ad = NULL;
92 #endif
93 	pdev->pd_saved_state = NULL;
94 	pdev->pd_intr_handles = NULL;
95 	pdev->pd_drvdata = NULL;
96 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
97 	pdev->bus->pb_pc = pa->pa_pc;
98 	pdev->bus->pb_dev = parent;
99 	pdev->bus->number = pa->pa_bus;
100 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
101 	pdev->vendor = PCI_VENDOR(pa->pa_id);
102 	pdev->device = PCI_PRODUCT(pa->pa_id);
103 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
104 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
105 	pdev->revision = PCI_REVISION(pa->pa_class);
106 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
107 
108 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
109 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
110 		const int reg = PCI_BAR(i);
111 
112 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
113 		    pa->pa_tag, reg);
114 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
115 			pdev->pd_resources[i].type,
116 			&pdev->pd_resources[i].addr,
117 			&pdev->pd_resources[i].size,
118 			&pdev->pd_resources[i].flags)) {
119 			pdev->pd_resources[i].addr = 0;
120 			pdev->pd_resources[i].size = 0;
121 			pdev->pd_resources[i].flags = 0;
122 		}
123 		pdev->pd_resources[i].kva = NULL;
124 		pdev->pd_resources[i].mapped = false;
125 	}
126 }
127 
128 int
129 pci_find_capability(struct pci_dev *pdev, int cap)
130 {
131 
132 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
133 	    NULL, NULL);
134 }
135 
136 int
137 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
138 {
139 
140 	KASSERT(!ISSET(reg, 3));
141 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
142 	return 0;
143 }
144 
145 int
146 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
147 {
148 
149 	KASSERT(!ISSET(reg, 1));
150 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
151 	    (reg &~ 2)) >> (8 * (reg & 2));
152 	return 0;
153 }
154 
155 int
156 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
157 {
158 
159 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
160 	    (reg &~ 3)) >> (8 * (reg & 3));
161 	return 0;
162 }
163 
164 int
165 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
166 {
167 
168 	KASSERT(!ISSET(reg, 3));
169 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
170 	return 0;
171 }
172 
173 int
174 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
175     uint32_t *valuep)
176 {
177 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
178 	    PCI_FUNC(devfn));
179 
180 	KASSERT(!ISSET(reg, 1));
181 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
182 	return 0;
183 }
184 
185 int
186 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
187     uint16_t *valuep)
188 {
189 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
190 	    PCI_FUNC(devfn));
191 
192 	KASSERT(!ISSET(reg, 1));
193 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
194 	return 0;
195 }
196 
197 int
198 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
199     uint8_t *valuep)
200 {
201 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
202 	    PCI_FUNC(devfn));
203 
204 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
205 	return 0;
206 }
207 
208 int
209 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
210     uint32_t value)
211 {
212 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
213 	    PCI_FUNC(devfn));
214 
215 	KASSERT(!ISSET(reg, 3));
216 	pci_conf_write(bus->pb_pc, tag, reg, value);
217 	return 0;
218 }
219 
220 static void
221 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
222     uint32_t value)
223 {
224 	const uint32_t mask = ~((~0UL) << (8 * bytes));
225 	const int reg32 = (reg &~ 3);
226 	const unsigned int shift = (8 * (reg & 3));
227 	uint32_t value32;
228 
229 	KASSERT(bytes <= 4);
230 	KASSERT(!ISSET(value, ~mask));
231 	value32 = pci_conf_read(pc, tag, reg32);
232 	value32 &=~ (mask << shift);
233 	value32 |= (value << shift);
234 	pci_conf_write(pc, tag, reg32, value32);
235 }
236 
237 int
238 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
239 {
240 
241 	KASSERT(!ISSET(reg, 1));
242 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
243 	return 0;
244 }
245 
246 int
247 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
248 {
249 
250 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
251 	return 0;
252 }
253 
254 int
255 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
256     uint16_t value)
257 {
258 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
259 	    PCI_FUNC(devfn));
260 
261 	KASSERT(!ISSET(reg, 1));
262 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
263 	return 0;
264 }
265 
266 int
267 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
268     uint8_t value)
269 {
270 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
271 	    PCI_FUNC(devfn));
272 
273 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
274 	return 0;
275 }
276 
277 int
278 pci_enable_msi(struct pci_dev *pdev)
279 {
280 	const struct pci_attach_args *const pa = &pdev->pd_pa;
281 
282 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
283 		return -EINVAL;
284 
285 	pdev->msi_enabled = 1;
286 	return 0;
287 }
288 
289 void
290 pci_disable_msi(struct pci_dev *pdev __unused)
291 {
292 	const struct pci_attach_args *const pa = &pdev->pd_pa;
293 
294 	if (pdev->pd_intr_handles != NULL) {
295 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
296 		pdev->pd_intr_handles = NULL;
297 	}
298 	pdev->msi_enabled = 0;
299 }
300 
301 void
302 pci_set_master(struct pci_dev *pdev)
303 {
304 	pcireg_t csr;
305 
306 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
307 	    PCI_COMMAND_STATUS_REG);
308 	csr |= PCI_COMMAND_MASTER_ENABLE;
309 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
310 	    PCI_COMMAND_STATUS_REG, csr);
311 }
312 
313 void
314 pci_clear_master(struct pci_dev *pdev)
315 {
316 	pcireg_t csr;
317 
318 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
319 	    PCI_COMMAND_STATUS_REG);
320 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
321 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
322 	    PCI_COMMAND_STATUS_REG, csr);
323 }
324 
325 bus_addr_t
326 pcibios_align_resource(void *p, const struct resource *resource,
327     bus_addr_t addr, bus_size_t size)
328 {
329 	panic("pcibios_align_resource has accessed unaligned neurons!");
330 }
331 
332 int
333 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
334     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
335     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
336 	bus_size_t) __unused,
337     struct pci_dev *pdev)
338 {
339 	const struct pci_attach_args *const pa = &pdev->pd_pa;
340 	bus_space_tag_t bst;
341 	int error;
342 
343 	switch (resource->flags) {
344 	case IORESOURCE_MEM:
345 		bst = pa->pa_memt;
346 		break;
347 
348 	case IORESOURCE_IO:
349 		bst = pa->pa_iot;
350 		break;
351 
352 	default:
353 		panic("I don't know what kind of resource you want!");
354 	}
355 
356 	resource->r_bst = bst;
357 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
358 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
359 	if (error)
360 		return error;
361 
362 	resource->end = start + (size - 1);
363 	return 0;
364 }
365 
366 /*
367  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
368  * defined only for their single purposes in i915drm, in
369  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
370  * generally without adapting pci_find_device (and pci_enumerate_bus
371  * internally) to pass a cookie through.
372  */
373 
374 static int
375 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
376 {
377 
378 	/* XXX domain */
379 	if (pa->pa_bus != 0)
380 		return 0;
381 	if (pa->pa_device != 0)
382 		return 0;
383 	if (pa->pa_function != 0)
384 		return 0;
385 
386 	return 1;
387 }
388 
389 struct pci_dev *
390 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
391 {
392 	struct pci_attach_args pa;
393 
394 	KASSERT(domain == 0);
395 	KASSERT(bus == 0);
396 	KASSERT(slot == PCI_DEVFN(0, 0));
397 
398 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
399 		return NULL;
400 
401 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
402 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
403 
404 	return pdev;
405 }
406 
407 static int
408 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
409 {
410 
411 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
412 		return 0;
413 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
414 		return 0;
415 
416 	return 1;
417 }
418 
419 void
420 pci_dev_put(struct pci_dev *pdev)
421 {
422 
423 	if (pdev == NULL)
424 		return;
425 
426 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
427 	kmem_free(pdev->bus, sizeof(*pdev->bus));
428 	kmem_free(pdev, sizeof(*pdev));
429 }
430 
431 struct pci_dev *		/* XXX i915 kludge */
432 pci_get_class(uint32_t class_subclass_shifted __unused, struct pci_dev *from)
433 {
434 	struct pci_attach_args pa;
435 
436 	KASSERT(class_subclass_shifted == (PCI_CLASS_BRIDGE_ISA << 8));
437 
438 	if (from != NULL) {
439 		pci_dev_put(from);
440 		return NULL;
441 	}
442 
443 	if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
444 		return NULL;
445 
446 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
447 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
448 
449 	return pdev;
450 }
451 
452 void
453 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
454 {
455 
456 	/* XXX Disable the ROM address decoder.  */
457 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
458 	KASSERT(vaddr == pdev->pd_rom_vaddr);
459 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
460 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
461 	pdev->pd_rom_vaddr = NULL;
462 }
463 
464 /* XXX Whattakludge!  Should move this in sys/arch/.  */
465 static int
466 pci_map_rom_md(struct pci_dev *pdev)
467 {
468 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
469 	const bus_addr_t rom_base = 0xc0000;
470 	const bus_size_t rom_size = 0x20000;
471 	bus_space_handle_t rom_bsh;
472 	int error;
473 
474 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
475 		return ENXIO;
476 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
477 		return ENXIO;
478 	/* XXX Check whether this is the primary VGA card?  */
479 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
480 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
481 	if (error)
482 		return ENXIO;
483 
484 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
485 	pdev->pd_rom_bsh = rom_bsh;
486 	pdev->pd_rom_size = rom_size;
487 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
488 
489 	return 0;
490 #else
491 	return ENXIO;
492 #endif
493 }
494 
495 void __pci_rom_iomem *
496 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
497 {
498 
499 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
500 
501 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
502 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
503 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
504 	    != 0)
505 		goto fail_mi;
506 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
507 
508 	/* XXX This type is obviously wrong in general...  */
509 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
510 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
511 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
512 		pci_unmap_rom(pdev, NULL);
513 		goto fail_mi;
514 	}
515 	goto success;
516 
517 fail_mi:
518 	if (pci_map_rom_md(pdev) != 0)
519 		goto fail_md;
520 
521 	/* XXX This type is obviously wrong in general...  */
522 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
523 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
524 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
525 		pci_unmap_rom(pdev, NULL);
526 		goto fail_md;
527 	}
528 
529 success:
530 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
531 	*sizep = pdev->pd_rom_found_size;
532 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
533 	    pdev->pd_rom_found_bsh);
534 	return pdev->pd_rom_vaddr;
535 
536 fail_md:
537 	return NULL;
538 }
539 
540 void __pci_rom_iomem *
541 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
542 {
543 
544 	*sizep = 0;
545 	return NULL;
546 }
547 
548 int
549 pci_enable_rom(struct pci_dev *pdev)
550 {
551 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
552 	const pcitag_t tag = pdev->pd_pa.pa_tag;
553 	pcireg_t addr;
554 	int s;
555 
556 	/* XXX Don't do anything if the ROM isn't there.  */
557 
558 	s = splhigh();
559 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
560 	addr |= PCI_MAPREG_ROM_ENABLE;
561 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
562 	splx(s);
563 
564 	return 0;
565 }
566 
567 void
568 pci_disable_rom(struct pci_dev *pdev)
569 {
570 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
571 	const pcitag_t tag = pdev->pd_pa.pa_tag;
572 	pcireg_t addr;
573 	int s;
574 
575 	s = splhigh();
576 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
577 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
578 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
579 	splx(s);
580 }
581 
582 bus_addr_t
583 pci_resource_start(struct pci_dev *pdev, unsigned i)
584 {
585 
586 	KASSERT(i < PCI_NUM_RESOURCES);
587 	return pdev->pd_resources[i].addr;
588 }
589 
590 bus_size_t
591 pci_resource_len(struct pci_dev *pdev, unsigned i)
592 {
593 
594 	KASSERT(i < PCI_NUM_RESOURCES);
595 	return pdev->pd_resources[i].size;
596 }
597 
598 bus_addr_t
599 pci_resource_end(struct pci_dev *pdev, unsigned i)
600 {
601 
602 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
603 }
604 
605 int
606 pci_resource_flags(struct pci_dev *pdev, unsigned i)
607 {
608 
609 	KASSERT(i < PCI_NUM_RESOURCES);
610 	return pdev->pd_resources[i].flags;
611 }
612 
613 void __pci_iomem *
614 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
615 {
616 	int error;
617 
618 	KASSERT(i < PCI_NUM_RESOURCES);
619 	KASSERT(pdev->pd_resources[i].kva == NULL);
620 
621 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
622 		return NULL;
623 	if (pdev->pd_resources[i].size < size)
624 		return NULL;
625 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
626 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
627 	    &pdev->pd_resources[i].bsh);
628 	if (error)
629 		return NULL;
630 	/* XXX Synchronize with drm_agp_borrow_hook in drm_agpsupport.c.  */
631 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
632 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
633 	    pdev->pd_resources[i].bsh);
634 	pdev->pd_resources[i].mapped = true;
635 
636 	return pdev->pd_resources[i].kva;
637 }
638 
639 void
640 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
641 {
642 	unsigned i;
643 
644 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
645 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
646 		if (pdev->pd_resources[i].kva == kva)
647 			break;
648 	}
649 	KASSERT(i < PCI_NUM_RESOURCES);
650 
651 	pdev->pd_resources[i].kva = NULL;
652 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
653 	    pdev->pd_resources[i].size);
654 }
655 
656 void
657 pci_save_state(struct pci_dev *pdev)
658 {
659 
660 	KASSERT(pdev->pd_saved_state == NULL);
661 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
662 	    KM_SLEEP);
663 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
664 	    pdev->pd_saved_state);
665 }
666 
667 void
668 pci_restore_state(struct pci_dev *pdev)
669 {
670 
671 	KASSERT(pdev->pd_saved_state != NULL);
672 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
673 	    pdev->pd_saved_state);
674 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
675 	pdev->pd_saved_state = NULL;
676 }
677 
678 bool
679 pci_is_pcie(struct pci_dev *pdev)
680 {
681 
682 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
683 }
684 
685 bool
686 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
687 {
688 
689 	/* XXX Cop-out.  */
690 	if (mask > DMA_BIT_MASK(32))
691 		return pci_dma64_available(&pdev->pd_pa);
692 	else
693 		return true;
694 }
695 
696 bool
697 pci_is_thunderbolt_attached(struct pci_dev *pdev)
698 {
699 
700 	/* XXX Cop-out.  */
701 	return false;
702 }
703 
704 bool
705 pci_is_root_bus(struct pci_bus *bus)
706 {
707 
708 	/* XXX Cop-out. */
709 	return false;
710 }
711 
712 int
713 pci_domain_nr(struct pci_bus *bus)
714 {
715 
716 	return device_unit(bus->pb_dev);
717 }
718 
719 /*
720  * We explicitly rename pci_enable/disable_device so that you have to
721  * review each use of them, since NetBSD's PCI API does _not_ respect
722  * our local enablecnt here, but there are different parts of NetBSD
723  * that automatically enable/disable like PMF, so you have to decide
724  * for each one whether to call it or not.
725  */
726 
727 int
728 linux_pci_enable_device(struct pci_dev *pdev)
729 {
730 	const struct pci_attach_args *pa = &pdev->pd_pa;
731 	pcireg_t csr;
732 	int s;
733 
734 	if (pdev->pd_enablecnt++)
735 		return 0;
736 
737 	s = splhigh();
738 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
739 	/* If someone else (firmware) already enabled it, credit them.  */
740 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
741 		pdev->pd_enablecnt++;
742 	csr |= PCI_COMMAND_IO_ENABLE;
743 	csr |= PCI_COMMAND_MEM_ENABLE;
744 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
745 	splx(s);
746 
747 	return 0;
748 }
749 
750 void
751 linux_pci_disable_device(struct pci_dev *pdev)
752 {
753 	const struct pci_attach_args *pa = &pdev->pd_pa;
754 	pcireg_t csr;
755 	int s;
756 
757 	if (--pdev->pd_enablecnt)
758 		return;
759 
760 	s = splhigh();
761 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
762 	csr &= ~PCI_COMMAND_IO_ENABLE;
763 	csr &= ~PCI_COMMAND_MEM_ENABLE;
764 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
765 	splx(s);
766 }
767 
768 void
769 linux_pci_dev_destroy(struct pci_dev *pdev)
770 {
771 	unsigned i;
772 
773 	if (pdev->bus != NULL) {
774 		kmem_free(pdev->bus, sizeof(*pdev->bus));
775 		pdev->bus = NULL;
776 	}
777 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
778 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
779 		pdev->pd_rom_vaddr = 0;
780 	}
781 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
782 		if (!pdev->pd_resources[i].mapped)
783 			continue;
784 		bus_space_unmap(pdev->pd_resources[i].bst,
785 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
786 	}
787 
788 	/* There is no way these should be still in use.  */
789 	KASSERT(pdev->pd_saved_state == NULL);
790 	KASSERT(pdev->pd_intr_handles == NULL);
791 }
792 
793 bool
794 dev_is_pci(struct pci_dev *pdev)
795 {
796 	return pdev != NULL;
797 }
798