xref: /openbsd-src/usr.sbin/vmd/pci.c (revision 0b7734b3d77bb9b21afec6f4621cae6c805dbd45)
1 /*	$OpenBSD: pci.c,v 1.6 2016/01/14 02:46:40 mlarkin Exp $	*/
2 
3 /*
4  * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include <string.h>
20 #include <sys/types.h>
21 #include <dev/pci/pcireg.h>
22 #include <dev/pci/pcidevs.h>
23 #include <dev/pci/virtioreg.h>
24 #include <machine/vmmvar.h>
25 #include "vmd.h"
26 #include "pci.h"
27 
28 struct pci pci;
29 
30 extern char *__progname;
31 
32 /* PIC IRQs, assigned to devices in order */
33 const uint8_t pci_pic_irqs[PCI_MAX_PIC_IRQS] = {3, 5, 9, 10, 11};
34 
35 /*
36  * pci_add_bar
37  *
38  * Adds a BAR for the PCI device 'id'. On access, 'barfn' will be
39  * called, and passed 'cookie' as an identifier.
40  *
41  * BARs are fixed size, meaning all I/O BARs requested have the
42  * same size and all MMIO BARs have the same size.
43  *
44  * Parameters:
45  *  id: PCI device to add the BAR to (local count, eg if id == 4,
46  *      this BAR is to be added to the VM's 5th PCI device)
47  *  type: type of the BAR to add (PCI_MAPREG_TYPE_xxx)
48  *  barfn: callback function invoked on BAR access
49  *  cookie: cookie passed to barfn on access
50  *
51  * Returns 0 if the BAR was added successfully, 1 otherwise.
52  */
53 int
54 pci_add_bar(uint8_t id, uint32_t type, void *barfn, void *cookie)
55 {
56 	uint8_t bar_reg_idx, bar_ct;
57 
58 	/* Check id */
59 	if (id >= pci.pci_dev_ct)
60 		return (1);
61 
62 	/* Can only add PCI_MAX_BARS BARs to any device */
63 	bar_ct = pci.pci_devices[id].pd_bar_ct;
64 	if (bar_ct >= PCI_MAX_BARS)
65 		return (1);
66 
67 	/* Compute BAR address and add */
68 	bar_reg_idx = (PCI_MAPREG_START + (bar_ct * 4)) / 4;
69 	if (type == PCI_MAPREG_TYPE_MEM) {
70 		if (pci.pci_next_mmio_bar >= VMM_PCI_MMIO_BAR_END)
71 			return (1);
72 
73 		pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
74 		    PCI_MAPREG_MEM_ADDR(pci.pci_next_mmio_bar);
75 		pci.pci_next_mmio_bar += VMM_PCI_MMIO_BAR_SIZE;
76 		pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
77 		pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
78 		pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_MMIO;
79 		pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_MMIO_BAR_SIZE;
80 		pci.pci_devices[id].pd_bar_ct++;
81 	} else if (type == PCI_MAPREG_TYPE_IO) {
82 		if (pci.pci_next_io_bar >= VMM_PCI_IO_BAR_END)
83 			return (1);
84 
85 		pci.pci_devices[id].pd_cfg_space[bar_reg_idx] =
86 		     PCI_MAPREG_IO_ADDR(pci.pci_next_io_bar) |
87 		     PCI_MAPREG_TYPE_IO;
88 		pci.pci_next_io_bar += VMM_PCI_IO_BAR_SIZE;
89 		pci.pci_devices[id].pd_barfunc[bar_ct] = barfn;
90 		pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie;
91 		dprintf("%s: adding pci bar cookie for dev %d bar %d = %p",
92 		    __progname, id, bar_ct, cookie);
93 		pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_IO;
94 		pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_IO_BAR_SIZE;
95 		pci.pci_devices[id].pd_bar_ct++;
96 	}
97 
98 	return (0);
99 }
100 
101 /*
102  * pci_add_device
103  *
104  * Adds a PCI device to the guest VM defined by the supplied parameters.
105  *
106  * Parameters:
107  *  id: the new PCI device ID (0 .. PCI_CONFIG_MAX_DEV)
108  *  vid: PCI VID of the new device
109  *  pid: PCI PID of the new device
110  *  class: PCI 'class' of the new device
111  *  subclass: PCI 'subclass' of the new device
112  *  subsys_vid: subsystem VID of the new device
113  *  subsys_id: subsystem ID of the new device
114  *  irq_needed: 1 if an IRQ should be assigned to this PCI device, 0 otherwise
115  *  csfunc: PCI config space callback function when the guest VM accesses
116  *      CS of this PCI device
117  *
118  * Return values:
119  *  0: the PCI device was added successfully. The PCI device ID is in 'id'.
120  *  1: the PCI device addition failed.
121  */
122 int
123 pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, uint8_t class,
124     uint8_t subclass, uint16_t subsys_vid, uint16_t subsys_id,
125     uint8_t irq_needed, pci_cs_fn_t csfunc)
126 {
127 	/* Exceeded max devices? */
128 	if (pci.pci_dev_ct >= PCI_CONFIG_MAX_DEV)
129 		return (1);
130 
131 	/* Exceeded max IRQs? */
132 	/* XXX we could share IRQs ... */
133 	if (pci.pci_next_pic_irq >= PCI_MAX_PIC_IRQS && irq_needed)
134 		return (1);
135 
136 	*id = pci.pci_dev_ct;
137 
138 	pci.pci_devices[*id].pd_vid = vid;
139 	pci.pci_devices[*id].pd_did = pid;
140 	pci.pci_devices[*id].pd_class = class;
141 	pci.pci_devices[*id].pd_subclass = subclass;
142 	pci.pci_devices[*id].pd_subsys_vid = subsys_vid;
143 	pci.pci_devices[*id].pd_subsys_id = subsys_id;
144 
145 	pci.pci_devices[*id].pd_csfunc = csfunc;
146 
147 	if (irq_needed) {
148 		pci.pci_devices[*id].pd_irq =
149 		    pci_pic_irqs[pci.pci_next_pic_irq];
150 		pci.pci_devices[*id].pd_int = 1;
151 		pci.pci_next_pic_irq++;
152 		dprintf("assigned irq %d to pci dev %d",
153 		    pci.pci_devices[*id].pd_irq, *id);
154 	}
155 
156 	pci.pci_dev_ct ++;
157 
158 	return (0);
159 }
160 
161 /*
162  * pci_init
163  *
164  * Initializes the PCI subsystem for the VM by adding a PCI host bridge
165  * as the first PCI device.
166  */
167 void
168 pci_init(void)
169 {
170 	uint8_t id;
171 
172 	memset(&pci, 0, sizeof(pci));
173 	pci.pci_next_mmio_bar = VMM_PCI_MMIO_BAR_BASE;
174 	pci.pci_next_io_bar = VMM_PCI_IO_BAR_BASE;
175 
176 	if (pci_add_device(&id, PCI_VENDOR_OPENBSD, PCI_PRODUCT_OPENBSD_PCHB,
177 	    PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST,
178 	    PCI_VENDOR_OPENBSD, 0, 0, NULL)) {
179 		log_warnx("%s: can't add PCI host bridge", __progname);
180 		return;
181 	}
182 }
183 
184 void
185 pci_handle_address_reg(struct vm_run_params *vrp)
186 {
187 	union vm_exit *vei = vrp->vrp_exit;
188 
189 	/*
190 	 * vei_dir == 0 : out instruction
191 	 *
192 	 * The guest wrote to the address register.
193 	 */
194 	if (vei->vei.vei_dir == 0) {
195 		pci.pci_addr_reg = vei->vei.vei_data;
196 	} else {
197 		/*
198 		 * vei_dir == 1 : in instruction
199 		 *
200 		 * The guest read the address register/
201 		 */
202 		vei->vei.vei_data = pci.pci_addr_reg;
203 	}
204 }
205 
206 uint8_t
207 pci_handle_io(struct vm_run_params *vrp)
208 {
209 	int i, j, k, l;
210 	uint16_t reg, b_hi, b_lo;
211 	pci_iobar_fn_t fn;
212 	union vm_exit *vei = vrp->vrp_exit;
213 	uint8_t intr;
214 
215 	k = -1;
216 	l = -1;
217 	reg = vei->vei.vei_port;
218 	intr = 0xFF;
219 
220 	for (i = 0 ; i < pci.pci_dev_ct ; i++) {
221 		for (j = 0 ; j < pci.pci_devices[i].pd_bar_ct; j++) {
222 			b_lo = PCI_MAPREG_IO_ADDR(pci.pci_devices[i].pd_bar[j]);
223 			b_hi = b_lo + VMM_PCI_IO_BAR_SIZE;
224 			if (reg >= b_lo && reg < b_hi) {
225 				if (pci.pci_devices[i].pd_barfunc[j]) {
226 					k = j;
227 					l = i;
228 				}
229 			}
230 		}
231 	}
232 
233 	if (k >= 0 && l >= 0) {
234 		fn = (pci_iobar_fn_t)pci.pci_devices[l].pd_barfunc[k];
235 		if (fn(vei->vei.vei_dir, reg -
236 		    PCI_MAPREG_IO_ADDR(pci.pci_devices[l].pd_bar[k]),
237 		    &vei->vei.vei_data, &intr,
238 		    pci.pci_devices[l].pd_bar_cookie[k])) {
239 			log_warnx("%s: pci i/o access function failed",
240 			    __progname);
241 		}
242 	} else {
243 		log_warnx("%s: no pci i/o function for reg 0x%llx",
244 		    __progname, (uint64_t)reg);
245 	}
246 
247 	if (intr != 0xFF) {
248 		intr = pci.pci_devices[l].pd_irq;
249 	}
250 
251 	return (intr);
252 }
253 
254 void
255 pci_handle_data_reg(struct vm_run_params *vrp)
256 {
257 	union vm_exit *vei = vrp->vrp_exit;
258 	uint8_t b, d, f, o;
259 	int ret;
260 	pci_cs_fn_t csfunc;
261 
262 	/* abort if the address register is wack */
263 	if (!(pci.pci_addr_reg & PCI_MODE1_ENABLE)) {
264 		/* if read, return FFs */
265 		if (vei->vei.vei_dir == 1)
266 			vei->vei.vei_data = 0xffffffff;
267 		log_warnx("invalid address register during pci read: "
268 		    "0x%llx", (uint64_t)pci.pci_addr_reg);
269 		return;
270 	}
271 
272 	b = (pci.pci_addr_reg >> 16) & 0xff;
273 	d = (pci.pci_addr_reg >> 11) & 0x1f;
274 	f = (pci.pci_addr_reg >> 8) & 0x7;
275 	o = (pci.pci_addr_reg & 0xfc);
276 
277 	csfunc = pci.pci_devices[d].pd_csfunc;
278 	if (csfunc != NULL) {
279 		ret = csfunc(vei->vei.vei_dir, (o / 4), &vei->vei.vei_data);
280 		if (ret)
281 			log_warnx("cfg space access function failed for "
282 			    "pci device %d", d);
283 		return;
284 	}
285 
286 	/* No config space function, fallback to default simple r/w impl. */
287 
288 	/*
289 	 * vei_dir == 0 : out instruction
290 	 *
291 	 * The guest wrote to the config space location denoted by the current
292 	 * value in the address register.
293 	 */
294 	if (vei->vei.vei_dir == 0) {
295 		if ((o >= 0x10 && o <= 0x24) &&
296 		    vei->vei.vei_data == 0xffffffff) {
297 			vei->vei.vei_data = 0xfffff000;
298 		}
299 		pci.pci_devices[d].pd_cfg_space[o / 4] = vei->vei.vei_data;
300 	} else {
301 		/*
302 		 * vei_dir == 1 : in instruction
303 		 *
304 		 * The guest read from the config space location determined by
305 		 * the current value in the address register.
306 		 */
307 		vei->vei.vei_data = pci.pci_devices[d].pd_cfg_space[o / 4];
308 	}
309 }
310