xref: /netbsd-src/sys/external/bsd/drm2/linux/linux_pci.c (revision 0e2e28bced52bda3788c857106bde6c44d2df3b8)
1 /*	$NetBSD: linux_pci.c,v 1.28 2024/05/19 17:36:08 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2013 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Taylor R. Campbell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #ifdef _KERNEL_OPT
33 #include "acpica.h"
34 #include "opt_pci.h"
35 #endif
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: linux_pci.c,v 1.28 2024/05/19 17:36:08 riastradh Exp $");
39 
40 #if NACPICA > 0
41 #include <dev/acpi/acpivar.h>
42 #include <dev/acpi/acpi_pci.h>
43 #endif
44 
45 #include <linux/pci.h>
46 
47 #include <drm/drm_agp_netbsd.h>
48 
49 device_t
50 pci_dev_dev(struct pci_dev *pdev)
51 {
52 
53 	return pdev->pd_dev;
54 }
55 
56 void
57 pci_set_drvdata(struct pci_dev *pdev, void *drvdata)
58 {
59 	pdev->pd_drvdata = drvdata;
60 }
61 
62 void *
63 pci_get_drvdata(struct pci_dev *pdev)
64 {
65 	return pdev->pd_drvdata;
66 }
67 
68 const char *
69 pci_name(struct pci_dev *pdev)
70 {
71 
72 	/* XXX not sure this has the right format */
73 	return device_xname(pci_dev_dev(pdev));
74 }
75 
76 /*
77  * Setup enough of a parent that we can access config space.
78  * This is gross and grovels pci(4) and ppb(4) internals.
79  */
80 static struct pci_dev *
81 alloc_fake_parent_device(device_t parent, const struct pci_attach_args *pa)
82 {
83 
84 	if (parent == NULL || !device_is_a(parent, "pci"))
85 		return NULL;
86 
87 	device_t pparent = device_parent(parent);
88 	if (pparent == NULL || !device_is_a(pparent, "ppb"))
89 		return NULL;
90 
91 	struct pci_softc *pcisc = device_private(parent);
92 	struct ppb_softc *ppbsc = device_private(pparent);
93 
94 	struct pci_dev *parentdev = kmem_zalloc(sizeof(*parentdev), KM_SLEEP);
95 
96 	/* Copy this device's pci_attach_args{} as a base-line. */
97 	struct pci_attach_args *npa = &parentdev->pd_pa;
98 	*npa = *pa;
99 
100 	/* Now update with stuff found in parent. */
101 	npa->pa_iot = pcisc->sc_iot;
102 	npa->pa_memt = pcisc->sc_memt;
103 	npa->pa_dmat = pcisc->sc_dmat;
104 	npa->pa_dmat64 = pcisc->sc_dmat64;
105 	npa->pa_pc = pcisc->sc_pc;
106 	npa->pa_flags = 0;	/* XXX? */
107 
108 	/* Copy the parent tag, and read some info about it. */
109 	npa->pa_tag = ppbsc->sc_tag;
110 	pcireg_t id = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_ID_REG);
111 	pcireg_t subid = pci_conf_read(npa->pa_pc, npa->pa_tag,
112 	    PCI_SUBSYS_ID_REG);
113 	pcireg_t class = pci_conf_read(npa->pa_pc, npa->pa_tag, PCI_CLASS_REG);
114 
115 	/*
116 	 * Fill in as much of pci_attach_args and pci_dev as reasonably possible.
117 	 * Most of this is not used currently.
118 	 */
119 	int bus, device, function;
120 	pci_decompose_tag(npa->pa_pc, npa->pa_tag, &bus, &device, &function);
121 	npa->pa_device = device;
122 	npa->pa_function = function;
123 	npa->pa_bus = bus;
124 	npa->pa_id = id;
125 	npa->pa_class = class;
126 	npa->pa_intrswiz = pcisc->sc_intrswiz;
127 	npa->pa_intrtag = pcisc->sc_intrtag;
128 	npa->pa_intrpin = PCI_INTERRUPT_PIN_NONE;
129 
130 	parentdev->pd_dev = parent;
131 
132 	parentdev->bus = NULL;
133 	parentdev->devfn = device << 3 | function;
134 	parentdev->vendor = PCI_VENDOR(id);
135 	parentdev->device = PCI_PRODUCT(id);
136 	parentdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subid);
137 	parentdev->subsystem_device = PCI_SUBSYS_ID(subid);
138 	parentdev->revision = PCI_REVISION(class);
139 	parentdev->class = __SHIFTOUT(class, 0xffffff00UL); /* ? */
140 
141 	return parentdev;
142 }
143 
144 void
145 linux_pci_dev_init(struct pci_dev *pdev, device_t dev, device_t parent,
146     const struct pci_attach_args *pa, int kludges)
147 {
148 	const uint32_t subsystem_id = pci_conf_read(pa->pa_pc, pa->pa_tag,
149 	    PCI_SUBSYS_ID_REG);
150 	unsigned i;
151 
152 	memset(pdev, 0, sizeof(*pdev)); /* paranoia */
153 
154 	pdev->pd_pa = *pa;
155 	pdev->pd_kludges = kludges;
156 	pdev->pd_rom_vaddr = NULL;
157 	pdev->pd_dev = dev;
158 #if (NACPICA > 0)
159 	const int seg = pci_get_segment(pa->pa_pc);
160 	pdev->pd_ad = acpi_pcidev_find(seg, pa->pa_bus,
161 	    pa->pa_device, pa->pa_function);
162 #else
163 	pdev->pd_ad = NULL;
164 #endif
165 	pdev->pd_saved_state = NULL;
166 	pdev->pd_intr_handles = NULL;
167 	pdev->pd_drvdata = NULL;
168 	pdev->bus = kmem_zalloc(sizeof(*pdev->bus), KM_NOSLEEP);
169 	pdev->bus->pb_pc = pa->pa_pc;
170 	pdev->bus->pb_dev = parent;
171 	pdev->bus->number = pa->pa_bus;
172 	/*
173 	 * NetBSD doesn't have an easy "am I PCIe" or "give me PCIe speed
174 	 * from capability" function, but we already emulate the Linux
175 	 * versions that do.
176 	 */
177 	if (pci_is_pcie(pdev)) {
178 		pdev->bus->max_bus_speed = pcie_get_speed_cap(pdev);
179 	} else {
180 		/* XXX: Do AGP/PCI-X, etc.? */
181 		pdev->bus->max_bus_speed = PCI_SPEED_UNKNOWN;
182 	}
183 	pdev->bus->self = alloc_fake_parent_device(parent, pa);
184 	pdev->devfn = PCI_DEVFN(pa->pa_device, pa->pa_function);
185 	pdev->vendor = PCI_VENDOR(pa->pa_id);
186 	pdev->device = PCI_PRODUCT(pa->pa_id);
187 	pdev->subsystem_vendor = PCI_SUBSYS_VENDOR(subsystem_id);
188 	pdev->subsystem_device = PCI_SUBSYS_ID(subsystem_id);
189 	pdev->revision = PCI_REVISION(pa->pa_class);
190 	pdev->class = __SHIFTOUT(pa->pa_class, 0xffffff00UL); /* ? */
191 
192 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
193 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
194 		const int reg = PCI_BAR(i);
195 
196 		pdev->pd_resources[i].type = pci_mapreg_type(pa->pa_pc,
197 		    pa->pa_tag, reg);
198 		if (pci_mapreg_info(pa->pa_pc, pa->pa_tag, reg,
199 			pdev->pd_resources[i].type,
200 			&pdev->pd_resources[i].addr,
201 			&pdev->pd_resources[i].size,
202 			&pdev->pd_resources[i].flags)) {
203 			pdev->pd_resources[i].addr = 0;
204 			pdev->pd_resources[i].size = 0;
205 			pdev->pd_resources[i].flags = 0;
206 		}
207 		pdev->pd_resources[i].kva = NULL;
208 		pdev->pd_resources[i].mapped = false;
209 	}
210 }
211 
212 int
213 pci_find_capability(struct pci_dev *pdev, int cap)
214 {
215 
216 	return pci_get_capability(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, cap,
217 	    NULL, NULL);
218 }
219 
220 int
221 pci_read_config_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
222 {
223 
224 	KASSERT(!ISSET(reg, 3));
225 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg);
226 	return 0;
227 }
228 
229 int
230 pci_read_config_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
231 {
232 
233 	KASSERT(!ISSET(reg, 1));
234 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
235 	    (reg &~ 2)) >> (8 * (reg & 2));
236 	return 0;
237 }
238 
239 int
240 pci_read_config_byte(struct pci_dev *pdev, int reg, uint8_t *valuep)
241 {
242 
243 	*valuep = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
244 	    (reg &~ 3)) >> (8 * (reg & 3));
245 	return 0;
246 }
247 
248 int
249 pci_write_config_dword(struct pci_dev *pdev, int reg, uint32_t value)
250 {
251 
252 	KASSERT(!ISSET(reg, 3));
253 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, value);
254 	return 0;
255 }
256 
257 int
258 pci_bus_read_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
259     uint32_t *valuep)
260 {
261 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
262 	    PCI_FUNC(devfn));
263 
264 	KASSERT(!ISSET(reg, 1));
265 	*valuep = pci_conf_read(bus->pb_pc, tag, reg & ~3) >> (8 * (reg & 3));
266 	return 0;
267 }
268 
269 int
270 pci_bus_read_config_word(struct pci_bus *bus, unsigned devfn, int reg,
271     uint16_t *valuep)
272 {
273 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
274 	    PCI_FUNC(devfn));
275 
276 	KASSERT(!ISSET(reg, 1));
277 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 2) >> (8 * (reg & 2));
278 	return 0;
279 }
280 
281 int
282 pci_bus_read_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
283     uint8_t *valuep)
284 {
285 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
286 	    PCI_FUNC(devfn));
287 
288 	*valuep = pci_conf_read(bus->pb_pc, tag, reg &~ 3) >> (8 * (reg & 3));
289 	return 0;
290 }
291 
292 int
293 pci_bus_write_config_dword(struct pci_bus *bus, unsigned devfn, int reg,
294     uint32_t value)
295 {
296 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
297 	    PCI_FUNC(devfn));
298 
299 	KASSERT(!ISSET(reg, 3));
300 	pci_conf_write(bus->pb_pc, tag, reg, value);
301 	return 0;
302 }
303 
304 static void
305 pci_rmw_config(pci_chipset_tag_t pc, pcitag_t tag, int reg, unsigned int bytes,
306     uint32_t value)
307 {
308 	const uint32_t mask = ~((~0UL) << (8 * bytes));
309 	const int reg32 = (reg &~ 3);
310 	const unsigned int shift = (8 * (reg & 3));
311 	uint32_t value32;
312 
313 	KASSERT(bytes <= 4);
314 	KASSERT(!ISSET(value, ~mask));
315 	value32 = pci_conf_read(pc, tag, reg32);
316 	value32 &=~ (mask << shift);
317 	value32 |= (value << shift);
318 	pci_conf_write(pc, tag, reg32, value32);
319 }
320 
321 int
322 pci_write_config_word(struct pci_dev *pdev, int reg, uint16_t value)
323 {
324 
325 	KASSERT(!ISSET(reg, 1));
326 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 2, value);
327 	return 0;
328 }
329 
330 int
331 pci_write_config_byte(struct pci_dev *pdev, int reg, uint8_t value)
332 {
333 
334 	pci_rmw_config(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag, reg, 1, value);
335 	return 0;
336 }
337 
338 int
339 pci_bus_write_config_word(struct pci_bus *bus, unsigned devfn, int reg,
340     uint16_t value)
341 {
342 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
343 	    PCI_FUNC(devfn));
344 
345 	KASSERT(!ISSET(reg, 1));
346 	pci_rmw_config(bus->pb_pc, tag, reg, 2, value);
347 	return 0;
348 }
349 
350 int
351 pci_bus_write_config_byte(struct pci_bus *bus, unsigned devfn, int reg,
352     uint8_t value)
353 {
354 	pcitag_t tag = pci_make_tag(bus->pb_pc, bus->number, PCI_SLOT(devfn),
355 	    PCI_FUNC(devfn));
356 
357 	pci_rmw_config(bus->pb_pc, tag, reg, 1, value);
358 	return 0;
359 }
360 
361 int
362 pci_enable_msi(struct pci_dev *pdev)
363 {
364 	const struct pci_attach_args *const pa = &pdev->pd_pa;
365 
366 	if (pci_msi_alloc_exact(pa, &pdev->pd_intr_handles, 1))
367 		return -EINVAL;
368 
369 	pdev->msi_enabled = 1;
370 	return 0;
371 }
372 
373 void
374 pci_disable_msi(struct pci_dev *pdev __unused)
375 {
376 	const struct pci_attach_args *const pa = &pdev->pd_pa;
377 
378 	if (pdev->pd_intr_handles != NULL) {
379 		pci_intr_release(pa->pa_pc, pdev->pd_intr_handles, 1);
380 		pdev->pd_intr_handles = NULL;
381 	}
382 	pdev->msi_enabled = 0;
383 }
384 
385 void
386 pci_set_master(struct pci_dev *pdev)
387 {
388 	pcireg_t csr;
389 
390 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
391 	    PCI_COMMAND_STATUS_REG);
392 	csr |= PCI_COMMAND_MASTER_ENABLE;
393 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
394 	    PCI_COMMAND_STATUS_REG, csr);
395 }
396 
397 void
398 pci_clear_master(struct pci_dev *pdev)
399 {
400 	pcireg_t csr;
401 
402 	csr = pci_conf_read(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
403 	    PCI_COMMAND_STATUS_REG);
404 	csr &= ~(pcireg_t)PCI_COMMAND_MASTER_ENABLE;
405 	pci_conf_write(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
406 	    PCI_COMMAND_STATUS_REG, csr);
407 }
408 
409 int
410 pcie_capability_read_dword(struct pci_dev *pdev, int reg, uint32_t *valuep)
411 {
412 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
413 	pcitag_t tag = pdev->pd_pa.pa_tag;
414 	int off;
415 
416 	*valuep = 0;
417 
418 	/* Must have capabilities. */
419 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
420 		return 1;
421 
422 	*valuep = pci_conf_read(pc, tag, off + reg);
423 
424 	return 0;
425 }
426 
427 int
428 pcie_capability_read_word(struct pci_dev *pdev, int reg, uint16_t *valuep)
429 {
430 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
431 	pcitag_t tag = pdev->pd_pa.pa_tag;
432 	int off;
433 
434 	*valuep = 0;
435 
436 	/* Must have capabilities. */
437 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
438 		return 1;
439 
440 	*valuep = pci_conf_read(pc, tag, off + (reg &~ 2)) >> (8 * (reg & 2));
441 
442 	return 0;
443 }
444 
445 int
446 pcie_capability_write_dword(struct pci_dev *pdev, int reg, uint32_t value)
447 {
448 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
449 	pcitag_t tag = pdev->pd_pa.pa_tag;
450 	int off;
451 
452 	/* Must have capabilities. */
453 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
454 		return 1;
455 
456 	pci_conf_write(pc, tag, off + reg, value);
457 
458 	return 0;
459 }
460 
461 int
462 pcie_capability_write_word(struct pci_dev *pdev, int reg, uint16_t value)
463 {
464 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
465 	pcitag_t tag = pdev->pd_pa.pa_tag;
466 	int off;
467 
468 	/* Must have capabilities. */
469 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
470 		return 1;
471 
472 	pci_rmw_config(pc, tag, off + reg, 2, value);
473 
474 	return 0;
475 }
476 
477 /* From PCIe 5.0 7.5.3.4 "Device Control Register" */
478 static const unsigned readrqmax[] = {
479 	128,
480 	256,
481 	512,
482 	1024,
483 	2048,
484 	4096,
485 };
486 
487 int
488 pcie_get_readrq(struct pci_dev *pdev)
489 {
490 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
491 	pcitag_t tag = pdev->pd_pa.pa_tag;
492 	unsigned val;
493 	int off;
494 
495 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
496 		return -EINVAL; /* XXX NetBSD->Linux */
497 
498 	val = __SHIFTOUT(pci_conf_read(pc, tag, off + PCIE_DCSR),
499 	    PCIE_DCSR_MAX_READ_REQ);
500 
501 	if (val >= __arraycount(readrqmax))
502 		val = 0;
503 	return readrqmax[val];
504 }
505 
506 int
507 pcie_set_readrq(struct pci_dev *pdev, int val)
508 {
509 	pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
510 	pcitag_t tag = pdev->pd_pa.pa_tag;
511 	pcireg_t reg, newval = 0;
512 	unsigned i;
513 	int off;
514 
515 	for (i = 0; i < __arraycount(readrqmax); i++) {
516 		if (readrqmax[i] == val) {
517 			newval = i;
518 			break;
519 		}
520 	}
521 
522 	if (i == __arraycount(readrqmax))
523 		return -EINVAL;
524 
525 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
526 		return -EINVAL; /* XXX NetBSD->Linux */
527 
528 	reg = pci_conf_read(pc, tag, off + PCIE_DCSR);
529 	reg &= ~PCIE_DCSR_MAX_READ_REQ | (newval << 12);
530 	pci_conf_write(pc, tag, off + PCIE_DCSR, reg);
531 
532 	return 0;
533 }
534 
535 bus_addr_t
536 pcibios_align_resource(void *p, const struct resource *resource,
537     bus_addr_t addr, bus_size_t size)
538 {
539 	panic("pcibios_align_resource has accessed unaligned neurons!");
540 }
541 
542 int
543 pci_bus_alloc_resource(struct pci_bus *bus, struct resource *resource,
544     bus_size_t size, bus_size_t align, bus_addr_t start, int type __unused,
545     bus_addr_t (*align_fn)(void *, const struct resource *, bus_addr_t,
546 	bus_size_t) __unused,
547     struct pci_dev *pdev)
548 {
549 	const struct pci_attach_args *const pa = &pdev->pd_pa;
550 	bus_space_tag_t bst;
551 	int error;
552 
553 	switch (resource->flags) {
554 	case IORESOURCE_MEM:
555 		bst = pa->pa_memt;
556 		break;
557 
558 	case IORESOURCE_IO:
559 		bst = pa->pa_iot;
560 		break;
561 
562 	default:
563 		panic("I don't know what kind of resource you want!");
564 	}
565 
566 	resource->r_bst = bst;
567 	error = bus_space_alloc(bst, start, __type_max(bus_addr_t),
568 	    size, align, 0, 0, &resource->start, &resource->r_bsh);
569 	if (error)
570 		return error;
571 
572 	resource->end = start + (size - 1);
573 	return 0;
574 }
575 
576 /*
577  * XXX Mega-kludgerific!  pci_get_bus_and_slot and pci_get_class are
578  * defined only for their single purposes in i915drm, in
579  * i915_get_bridge_dev and intel_detect_pch.  We can't define them more
580  * generally without adapting pci_find_device (and pci_enumerate_bus
581  * internally) to pass a cookie through.
582  */
583 
584 static int
585 pci_kludgey_match_bus0_dev0_func0(const struct pci_attach_args *pa)
586 {
587 
588 	/* XXX domain */
589 	if (pa->pa_bus != 0)
590 		return 0;
591 	if (pa->pa_device != 0)
592 		return 0;
593 	if (pa->pa_function != 0)
594 		return 0;
595 
596 	return 1;
597 }
598 
599 struct pci_dev *
600 pci_get_domain_bus_and_slot(int domain, int bus, int slot)
601 {
602 	struct pci_attach_args pa;
603 
604 	KASSERT(domain == 0);
605 	KASSERT(bus == 0);
606 	KASSERT(slot == PCI_DEVFN(0, 0));
607 
608 	if (!pci_find_device(&pa, &pci_kludgey_match_bus0_dev0_func0))
609 		return NULL;
610 
611 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
612 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
613 
614 	return pdev;
615 }
616 
617 static int
618 pci_kludgey_match_isa_bridge(const struct pci_attach_args *pa)
619 {
620 
621 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_BRIDGE)
622 		return 0;
623 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_BRIDGE_ISA)
624 		return 0;
625 
626 	return 1;
627 }
628 
629 static int
630 pci_kludgey_match_other_display(const struct pci_attach_args *pa)
631 {
632 
633 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY)
634 		return 0;
635 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_MISC)
636 		return 0;
637 
638 	return 1;
639 }
640 
641 static int
642 pci_kludgey_match_vga_display(const struct pci_attach_args *pa)
643 {
644 
645 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY)
646 		return 0;
647 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
648 		return 0;
649 
650 	return 1;
651 }
652 
653 static int
654 pci_kludgey_match_3d_display(const struct pci_attach_args *pa)
655 {
656 
657 	if (PCI_CLASS(pa->pa_class) != PCI_CLASS_DISPLAY)
658 		return 0;
659 	if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_DISPLAY_3D)
660 		return 0;
661 
662 	return 1;
663 }
664 
665 void
666 pci_dev_put(struct pci_dev *pdev)
667 {
668 
669 	if (pdev == NULL)
670 		return;
671 
672 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_GET_MUMBLE));
673 	kmem_free(pdev->bus, sizeof(*pdev->bus));
674 	kmem_free(pdev, sizeof(*pdev));
675 }
676 
677 struct pci_dev *		/* XXX i915/amdgpu kludge */
678 pci_get_class(uint32_t class_subclass_shifted, struct pci_dev *from)
679 {
680 	struct pci_attach_args pa;
681 
682 	if (from != NULL) {
683 		pci_dev_put(from);
684 		return NULL;
685 	}
686 
687 	switch (class_subclass_shifted) {
688 	case PCI_CLASS_BRIDGE_ISA << 8:
689 		if (!pci_find_device(&pa, &pci_kludgey_match_isa_bridge))
690 			return NULL;
691 		break;
692 	case PCI_CLASS_DISPLAY_OTHER << 8:
693 		if (!pci_find_device(&pa, &pci_kludgey_match_other_display))
694 			return NULL;
695 		break;
696 	case PCI_CLASS_DISPLAY_VGA << 8:
697 		if (!pci_find_device(&pa, &pci_kludgey_match_vga_display))
698 			return NULL;
699 		break;
700 	case PCI_CLASS_DISPLAY_3D << 8:
701 		if (!pci_find_device(&pa, &pci_kludgey_match_3d_display))
702 			return NULL;
703 		break;
704 	default:
705 		panic("unknown pci_get_class: %"PRIx32,
706 		    class_subclass_shifted);
707 	}
708 
709 	struct pci_dev *const pdev = kmem_zalloc(sizeof(*pdev), KM_SLEEP);
710 	linux_pci_dev_init(pdev, NULL, NULL, &pa, NBPCI_KLUDGE_GET_MUMBLE);
711 
712 	return pdev;
713 }
714 
715 int
716 pci_dev_present(const struct pci_device_id *ids)
717 {
718 
719 	/* XXX implement me -- pci_find_device doesn't pass a cookie */
720 	return 0;
721 }
722 
723 void
724 pci_unmap_rom(struct pci_dev *pdev, void __pci_rom_iomem *vaddr __unused)
725 {
726 
727 	/* XXX Disable the ROM address decoder.  */
728 	KASSERT(ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
729 	KASSERT(vaddr == pdev->pd_rom_vaddr);
730 	bus_space_unmap(pdev->pd_rom_bst, pdev->pd_rom_bsh, pdev->pd_rom_size);
731 	pdev->pd_kludges &= ~NBPCI_KLUDGE_MAP_ROM;
732 	pdev->pd_rom_vaddr = NULL;
733 }
734 
735 /* XXX Whattakludge!  Should move this in sys/arch/.  */
736 static int
737 pci_map_rom_md(struct pci_dev *pdev)
738 {
739 #if defined(__i386__) || defined(__x86_64__) || defined(__ia64__)
740 	const bus_addr_t rom_base = 0xc0000;
741 	const bus_size_t rom_size = 0x20000;
742 	bus_space_handle_t rom_bsh;
743 	int error;
744 
745 	if (PCI_CLASS(pdev->pd_pa.pa_class) != PCI_CLASS_DISPLAY)
746 		return ENXIO;
747 	if (PCI_SUBCLASS(pdev->pd_pa.pa_class) != PCI_SUBCLASS_DISPLAY_VGA)
748 		return ENXIO;
749 	/* XXX Check whether this is the primary VGA card?  */
750 	error = bus_space_map(pdev->pd_pa.pa_memt, rom_base, rom_size,
751 	    (BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE), &rom_bsh);
752 	if (error)
753 		return ENXIO;
754 
755 	pdev->pd_rom_bst = pdev->pd_pa.pa_memt;
756 	pdev->pd_rom_bsh = rom_bsh;
757 	pdev->pd_rom_size = rom_size;
758 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
759 
760 	return 0;
761 #else
762 	return ENXIO;
763 #endif
764 }
765 
766 void __pci_rom_iomem *
767 pci_map_rom(struct pci_dev *pdev, size_t *sizep)
768 {
769 
770 	KASSERT(!ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM));
771 
772 	if (pci_mapreg_map(&pdev->pd_pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_ROM,
773 		(BUS_SPACE_MAP_PREFETCHABLE | BUS_SPACE_MAP_LINEAR),
774 		&pdev->pd_rom_bst, &pdev->pd_rom_bsh, NULL, &pdev->pd_rom_size)
775 	    != 0)
776 		goto fail_mi;
777 	pdev->pd_kludges |= NBPCI_KLUDGE_MAP_ROM;
778 
779 	/* XXX This type is obviously wrong in general...  */
780 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
781 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
782 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
783 		pci_unmap_rom(pdev, NULL);
784 		goto fail_mi;
785 	}
786 	goto success;
787 
788 fail_mi:
789 	if (pci_map_rom_md(pdev) != 0)
790 		goto fail_md;
791 
792 	/* XXX This type is obviously wrong in general...  */
793 	if (pci_find_rom(&pdev->pd_pa, pdev->pd_rom_bst, pdev->pd_rom_bsh,
794 		pdev->pd_rom_size, PCI_ROM_CODE_TYPE_X86,
795 		&pdev->pd_rom_found_bsh, &pdev->pd_rom_found_size)) {
796 		pci_unmap_rom(pdev, NULL);
797 		goto fail_md;
798 	}
799 
800 success:
801 	KASSERT(pdev->pd_rom_found_size <= SIZE_T_MAX);
802 	*sizep = pdev->pd_rom_found_size;
803 	pdev->pd_rom_vaddr = bus_space_vaddr(pdev->pd_rom_bst,
804 	    pdev->pd_rom_found_bsh);
805 	return pdev->pd_rom_vaddr;
806 
807 fail_md:
808 	return NULL;
809 }
810 
811 void __pci_rom_iomem *
812 pci_platform_rom(struct pci_dev *pdev __unused, size_t *sizep)
813 {
814 
815 	*sizep = 0;
816 	return NULL;
817 }
818 
819 int
820 pci_enable_rom(struct pci_dev *pdev)
821 {
822 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
823 	const pcitag_t tag = pdev->pd_pa.pa_tag;
824 	pcireg_t addr;
825 	int s;
826 
827 	/* XXX Don't do anything if the ROM isn't there.  */
828 
829 	s = splhigh();
830 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
831 	addr |= PCI_MAPREG_ROM_ENABLE;
832 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
833 	splx(s);
834 
835 	return 0;
836 }
837 
838 void
839 pci_disable_rom(struct pci_dev *pdev)
840 {
841 	const pci_chipset_tag_t pc = pdev->pd_pa.pa_pc;
842 	const pcitag_t tag = pdev->pd_pa.pa_tag;
843 	pcireg_t addr;
844 	int s;
845 
846 	s = splhigh();
847 	addr = pci_conf_read(pc, tag, PCI_MAPREG_ROM);
848 	addr &= ~(pcireg_t)PCI_MAPREG_ROM_ENABLE;
849 	pci_conf_write(pc, tag, PCI_MAPREG_ROM, addr);
850 	splx(s);
851 }
852 
853 bus_addr_t
854 pci_resource_start(struct pci_dev *pdev, unsigned i)
855 {
856 
857 	if (i >= PCI_NUM_RESOURCES)
858 		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
859 	return pdev->pd_resources[i].addr;
860 }
861 
862 bus_size_t
863 pci_resource_len(struct pci_dev *pdev, unsigned i)
864 {
865 
866 	if (i >= PCI_NUM_RESOURCES)
867 		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
868 	return pdev->pd_resources[i].size;
869 }
870 
871 bus_addr_t
872 pci_resource_end(struct pci_dev *pdev, unsigned i)
873 {
874 
875 	return pci_resource_start(pdev, i) + (pci_resource_len(pdev, i) - 1);
876 }
877 
878 int
879 pci_resource_flags(struct pci_dev *pdev, unsigned i)
880 {
881 
882 	if (i >= PCI_NUM_RESOURCES)
883 		panic("resource %d >= max %d", i, PCI_NUM_RESOURCES);
884 	return pdev->pd_resources[i].flags;
885 }
886 
887 void __pci_iomem *
888 pci_iomap(struct pci_dev *pdev, unsigned i, bus_size_t size)
889 {
890 	int error;
891 
892 	KASSERT(i < PCI_NUM_RESOURCES);
893 	KASSERT(pdev->pd_resources[i].kva == NULL);
894 
895 	if (PCI_MAPREG_TYPE(pdev->pd_resources[i].type) != PCI_MAPREG_TYPE_MEM)
896 		return NULL;
897 	if (pdev->pd_resources[i].size < size)
898 		return NULL;
899 	error = bus_space_map(pdev->pd_pa.pa_memt, pdev->pd_resources[i].addr,
900 	    size, BUS_SPACE_MAP_LINEAR | pdev->pd_resources[i].flags,
901 	    &pdev->pd_resources[i].bsh);
902 	if (error)
903 		return NULL;
904 	pdev->pd_resources[i].bst = pdev->pd_pa.pa_memt;
905 	pdev->pd_resources[i].kva = bus_space_vaddr(pdev->pd_resources[i].bst,
906 	    pdev->pd_resources[i].bsh);
907 	pdev->pd_resources[i].mapped = true;
908 
909 	return pdev->pd_resources[i].kva;
910 }
911 
912 void
913 pci_iounmap(struct pci_dev *pdev, void __pci_iomem *kva)
914 {
915 	unsigned i;
916 
917 	CTASSERT(__arraycount(pdev->pd_resources) == PCI_NUM_RESOURCES);
918 	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
919 		if (pdev->pd_resources[i].kva == kva)
920 			break;
921 	}
922 	KASSERT(i < PCI_NUM_RESOURCES);
923 
924 	pdev->pd_resources[i].kva = NULL;
925 	bus_space_unmap(pdev->pd_resources[i].bst, pdev->pd_resources[i].bsh,
926 	    pdev->pd_resources[i].size);
927 }
928 
929 void
930 pci_save_state(struct pci_dev *pdev)
931 {
932 
933 	KASSERT(pdev->pd_saved_state == NULL);
934 	pdev->pd_saved_state = kmem_alloc(sizeof(*pdev->pd_saved_state),
935 	    KM_SLEEP);
936 	pci_conf_capture(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
937 	    pdev->pd_saved_state);
938 }
939 
940 void
941 pci_restore_state(struct pci_dev *pdev)
942 {
943 
944 	KASSERT(pdev->pd_saved_state != NULL);
945 	pci_conf_restore(pdev->pd_pa.pa_pc, pdev->pd_pa.pa_tag,
946 	    pdev->pd_saved_state);
947 	kmem_free(pdev->pd_saved_state, sizeof(*pdev->pd_saved_state));
948 	pdev->pd_saved_state = NULL;
949 }
950 
951 bool
952 pci_is_pcie(struct pci_dev *pdev)
953 {
954 
955 	return (pci_find_capability(pdev, PCI_CAP_PCIEXPRESS) != 0);
956 }
957 
958 bool
959 pci_dma_supported(struct pci_dev *pdev, uintmax_t mask)
960 {
961 
962 	/* XXX Cop-out.  */
963 	if (mask > DMA_BIT_MASK(32))
964 		return pci_dma64_available(&pdev->pd_pa);
965 	else
966 		return true;
967 }
968 
969 bool
970 pci_is_thunderbolt_attached(struct pci_dev *pdev)
971 {
972 
973 	/* XXX Cop-out.  */
974 	return false;
975 }
976 
977 bool
978 pci_is_root_bus(struct pci_bus *bus)
979 {
980 
981 	return bus->number == 0;
982 }
983 
984 int
985 pci_domain_nr(struct pci_bus *bus)
986 {
987 
988 	return pci_get_segment(bus->pb_pc);
989 }
990 
991 /*
992  * We explicitly rename pci_enable/disable_device so that you have to
993  * review each use of them, since NetBSD's PCI API does _not_ respect
994  * our local enablecnt here, but there are different parts of NetBSD
995  * that automatically enable/disable like PMF, so you have to decide
996  * for each one whether to call it or not.
997  */
998 
999 int
1000 linux_pci_enable_device(struct pci_dev *pdev)
1001 {
1002 	const struct pci_attach_args *pa = &pdev->pd_pa;
1003 	pcireg_t csr;
1004 	int s;
1005 
1006 	if (pdev->pd_enablecnt++)
1007 		return 0;
1008 
1009 	s = splhigh();
1010 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1011 	/* If someone else (firmware) already enabled it, credit them.  */
1012 	if (csr & (PCI_COMMAND_IO_ENABLE|PCI_COMMAND_MEM_ENABLE))
1013 		pdev->pd_enablecnt++;
1014 	csr |= PCI_COMMAND_IO_ENABLE;
1015 	csr |= PCI_COMMAND_MEM_ENABLE;
1016 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
1017 	splx(s);
1018 
1019 	return 0;
1020 }
1021 
1022 void
1023 linux_pci_disable_device(struct pci_dev *pdev)
1024 {
1025 	const struct pci_attach_args *pa = &pdev->pd_pa;
1026 	pcireg_t csr;
1027 	int s;
1028 
1029 	if (--pdev->pd_enablecnt)
1030 		return;
1031 
1032 	s = splhigh();
1033 	csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
1034 	csr &= ~PCI_COMMAND_IO_ENABLE;
1035 	csr &= ~PCI_COMMAND_MEM_ENABLE;
1036 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, csr);
1037 	splx(s);
1038 }
1039 
1040 void
1041 linux_pci_dev_destroy(struct pci_dev *pdev)
1042 {
1043 	unsigned i;
1044 
1045 	if (pdev->bus->self != NULL) {
1046 		kmem_free(pdev->bus->self, sizeof(*pdev->bus->self));
1047 	}
1048 	if (pdev->bus != NULL) {
1049 		kmem_free(pdev->bus, sizeof(*pdev->bus));
1050 		pdev->bus = NULL;
1051 	}
1052 	if (ISSET(pdev->pd_kludges, NBPCI_KLUDGE_MAP_ROM)) {
1053 		pci_unmap_rom(pdev, pdev->pd_rom_vaddr);
1054 		pdev->pd_rom_vaddr = 0;
1055 	}
1056 	for (i = 0; i < __arraycount(pdev->pd_resources); i++) {
1057 		if (!pdev->pd_resources[i].mapped)
1058 			continue;
1059 		bus_space_unmap(pdev->pd_resources[i].bst,
1060 		    pdev->pd_resources[i].bsh, pdev->pd_resources[i].size);
1061 	}
1062 
1063 	/* There is no way these should be still in use.  */
1064 	KASSERT(pdev->pd_saved_state == NULL);
1065 	KASSERT(pdev->pd_intr_handles == NULL);
1066 }
1067 
1068 enum pci_bus_speed
1069 pcie_get_speed_cap(struct pci_dev *dev)
1070 {
1071 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1072 	pcitag_t tag = dev->pd_pa.pa_tag;
1073 	pcireg_t lcap, lcap2, xcap;
1074 	int off;
1075 
1076 	/* Must have capabilities. */
1077 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1078 		return PCI_SPEED_UNKNOWN;
1079 
1080 	/* Only PCIe 3.x has LCAP2. */
1081 	xcap = pci_conf_read(pc, tag, off + PCIE_XCAP);
1082 	if (__SHIFTOUT(xcap, PCIE_XCAP_VER_MASK) >= 2) {
1083 		lcap2 = pci_conf_read(pc, tag, off + PCIE_LCAP2);
1084 		if (lcap2) {
1085 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS64) != 0) {
1086 				return PCIE_SPEED_64_0GT;
1087 			}
1088 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS32) != 0) {
1089 				return PCIE_SPEED_32_0GT;
1090 			}
1091 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS16) != 0) {
1092 				return PCIE_SPEED_16_0GT;
1093 			}
1094 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS8) != 0) {
1095 				return PCIE_SPEED_8_0GT;
1096 			}
1097 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS5) != 0) {
1098 				return PCIE_SPEED_5_0GT;
1099 			}
1100 			if ((lcap2 & PCIE_LCAP2_SUP_LNKS2) != 0) {
1101 				return PCIE_SPEED_2_5GT;
1102 			}
1103 		}
1104 	}
1105 
1106 	lcap = pci_conf_read(pc, tag, off + PCIE_LCAP);
1107 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_64) {
1108 		return PCIE_SPEED_64_0GT;
1109 	}
1110 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_32) {
1111 		return PCIE_SPEED_32_0GT;
1112 	}
1113 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_16) {
1114 		return PCIE_SPEED_16_0GT;
1115 	}
1116 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_8) {
1117 		return PCIE_SPEED_8_0GT;
1118 	}
1119 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_5) {
1120 		return PCIE_SPEED_5_0GT;
1121 	}
1122 	if ((lcap & PCIE_LCAP_MAX_SPEED) == PCIE_LCAP_MAX_SPEED_2) {
1123 		return PCIE_SPEED_2_5GT;
1124 	}
1125 
1126 	return PCI_SPEED_UNKNOWN;
1127 }
1128 
1129 /*
1130  * This should walk the tree, it only checks this device currently.
1131  * It also does not write to limiting_dev (the only caller in drm2
1132  * currently does not use it.)
1133  */
1134 unsigned
1135 pcie_bandwidth_available(struct pci_dev *dev,
1136     struct pci_dev **limiting_dev,
1137     enum pci_bus_speed *speed,
1138     enum pcie_link_width *width)
1139 {
1140 	pci_chipset_tag_t pc = dev->pd_pa.pa_pc;
1141 	pcitag_t tag = dev->pd_pa.pa_tag;
1142 	pcireg_t lcsr;
1143 	unsigned per_line_speed, num_lanes;
1144 	int off;
1145 
1146 	/* Must have capabilities. */
1147 	if (pci_get_capability(pc, tag, PCI_CAP_PCIEXPRESS, &off, NULL) == 0)
1148 		return 0;
1149 
1150 	if (speed)
1151 		*speed = PCI_SPEED_UNKNOWN;
1152 	if (width)
1153 		*width = 0;
1154 
1155 	lcsr = pci_conf_read(pc, tag, off + PCIE_LCSR);
1156 
1157 	switch (lcsr & PCIE_LCSR_NLW) {
1158 	case PCIE_LCSR_NLW_X1:
1159 	case PCIE_LCSR_NLW_X2:
1160 	case PCIE_LCSR_NLW_X4:
1161 	case PCIE_LCSR_NLW_X8:
1162 	case PCIE_LCSR_NLW_X12:
1163 	case PCIE_LCSR_NLW_X16:
1164 	case PCIE_LCSR_NLW_X32:
1165 		num_lanes = __SHIFTOUT(lcsr, PCIE_LCSR_NLW);
1166 		if (width)
1167 			*width = num_lanes;
1168 		break;
1169 	default:
1170 		num_lanes = 0;
1171 		break;
1172 	}
1173 
1174 	switch (__SHIFTOUT(lcsr, PCIE_LCSR_LINKSPEED)) {
1175 	case PCIE_LCSR_LINKSPEED_2:
1176 		*speed = PCIE_SPEED_2_5GT;
1177 		per_line_speed = 2500 * 8 / 10;
1178 		break;
1179 	case PCIE_LCSR_LINKSPEED_5:
1180 		*speed = PCIE_SPEED_5_0GT;
1181 		per_line_speed = 5000 * 8 / 10;
1182 		break;
1183 	case PCIE_LCSR_LINKSPEED_8:
1184 		*speed = PCIE_SPEED_8_0GT;
1185 		per_line_speed = 8000 * 128 / 130;
1186 		break;
1187 	case PCIE_LCSR_LINKSPEED_16:
1188 		*speed = PCIE_SPEED_16_0GT;
1189 		per_line_speed = 16000 * 128 / 130;
1190 		break;
1191 	case PCIE_LCSR_LINKSPEED_32:
1192 		*speed = PCIE_SPEED_32_0GT;
1193 		per_line_speed = 32000 * 128 / 130;
1194 		break;
1195 	case PCIE_LCSR_LINKSPEED_64:
1196 		*speed = PCIE_SPEED_64_0GT;
1197 		per_line_speed = 64000 * 128 / 130;
1198 		break;
1199 	default:
1200 		per_line_speed = 0;
1201 	}
1202 
1203 	return num_lanes * per_line_speed;
1204 }
1205