1 /* $OpenBSD: pci.c,v 1.13 2017/01/21 12:45:41 mlarkin Exp $ */ 2 3 /* 4 * Copyright (c) 2015 Mike Larkin <mlarkin@openbsd.org> 5 * 6 * Permission to use, copy, modify, and distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include <sys/types.h> 20 21 #include <dev/pci/pcireg.h> 22 #include <dev/pci/pcidevs.h> 23 #include <dev/pv/virtioreg.h> 24 #include <machine/vmmvar.h> 25 26 #include <string.h> 27 #include "vmd.h" 28 #include "pci.h" 29 30 struct pci pci; 31 32 extern char *__progname; 33 34 /* PIC IRQs, assigned to devices in order */ 35 const uint8_t pci_pic_irqs[PCI_MAX_PIC_IRQS] = {3, 5, 7, 9, 10, 11, 14, 15}; 36 37 /* 38 * pci_add_bar 39 * 40 * Adds a BAR for the PCI device 'id'. On access, 'barfn' will be 41 * called, and passed 'cookie' as an identifier. 42 * 43 * BARs are fixed size, meaning all I/O BARs requested have the 44 * same size and all MMIO BARs have the same size. 45 * 46 * Parameters: 47 * id: PCI device to add the BAR to (local count, eg if id == 4, 48 * this BAR is to be added to the VM's 5th PCI device) 49 * type: type of the BAR to add (PCI_MAPREG_TYPE_xxx) 50 * barfn: callback function invoked on BAR access 51 * cookie: cookie passed to barfn on access 52 * 53 * Returns 0 if the BAR was added successfully, 1 otherwise. 54 */ 55 int 56 pci_add_bar(uint8_t id, uint32_t type, void *barfn, void *cookie) 57 { 58 uint8_t bar_reg_idx, bar_ct; 59 60 /* Check id */ 61 if (id >= pci.pci_dev_ct) 62 return (1); 63 64 /* Can only add PCI_MAX_BARS BARs to any device */ 65 bar_ct = pci.pci_devices[id].pd_bar_ct; 66 if (bar_ct >= PCI_MAX_BARS) 67 return (1); 68 69 /* Compute BAR address and add */ 70 bar_reg_idx = (PCI_MAPREG_START + (bar_ct * 4)) / 4; 71 if (type == PCI_MAPREG_TYPE_MEM) { 72 if (pci.pci_next_mmio_bar >= VMM_PCI_MMIO_BAR_END) 73 return (1); 74 75 pci.pci_devices[id].pd_cfg_space[bar_reg_idx] = 76 PCI_MAPREG_MEM_ADDR(pci.pci_next_mmio_bar); 77 pci.pci_next_mmio_bar += VMM_PCI_MMIO_BAR_SIZE; 78 pci.pci_devices[id].pd_barfunc[bar_ct] = barfn; 79 pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie; 80 pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_MMIO; 81 pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_MMIO_BAR_SIZE; 82 pci.pci_devices[id].pd_bar_ct++; 83 } else if (type == PCI_MAPREG_TYPE_IO) { 84 if (pci.pci_next_io_bar >= VMM_PCI_IO_BAR_END) 85 return (1); 86 87 pci.pci_devices[id].pd_cfg_space[bar_reg_idx] = 88 PCI_MAPREG_IO_ADDR(pci.pci_next_io_bar) | 89 PCI_MAPREG_TYPE_IO; 90 pci.pci_next_io_bar += VMM_PCI_IO_BAR_SIZE; 91 pci.pci_devices[id].pd_barfunc[bar_ct] = barfn; 92 pci.pci_devices[id].pd_bar_cookie[bar_ct] = cookie; 93 dprintf("%s: adding pci bar cookie for dev %d bar %d = %p", 94 __progname, id, bar_ct, cookie); 95 pci.pci_devices[id].pd_bartype[bar_ct] = PCI_BAR_TYPE_IO; 96 pci.pci_devices[id].pd_barsize[bar_ct] = VMM_PCI_IO_BAR_SIZE; 97 pci.pci_devices[id].pd_bar_ct++; 98 } 99 100 return (0); 101 } 102 103 /* 104 * pci_get_dev_irq 105 * 106 * Returns the IRQ for the specified PCI device 107 * 108 * Parameters: 109 * id: PCI device id to return IRQ for 110 * 111 * Return values: 112 * The IRQ for the device, or 0xff if no device IRQ assigned 113 */ 114 uint8_t 115 pci_get_dev_irq(uint8_t id) 116 { 117 if (pci.pci_devices[id].pd_int) 118 return pci.pci_devices[id].pd_irq; 119 else 120 return 0xFF; 121 } 122 123 /* 124 * pci_add_device 125 * 126 * Adds a PCI device to the guest VM defined by the supplied parameters. 127 * 128 * Parameters: 129 * id: the new PCI device ID (0 .. PCI_CONFIG_MAX_DEV) 130 * vid: PCI VID of the new device 131 * pid: PCI PID of the new device 132 * class: PCI 'class' of the new device 133 * subclass: PCI 'subclass' of the new device 134 * subsys_vid: subsystem VID of the new device 135 * subsys_id: subsystem ID of the new device 136 * irq_needed: 1 if an IRQ should be assigned to this PCI device, 0 otherwise 137 * csfunc: PCI config space callback function when the guest VM accesses 138 * CS of this PCI device 139 * 140 * Return values: 141 * 0: the PCI device was added successfully. The PCI device ID is in 'id'. 142 * 1: the PCI device addition failed. 143 */ 144 int 145 pci_add_device(uint8_t *id, uint16_t vid, uint16_t pid, uint8_t class, 146 uint8_t subclass, uint16_t subsys_vid, uint16_t subsys_id, 147 uint8_t irq_needed, pci_cs_fn_t csfunc) 148 { 149 /* Exceeded max devices? */ 150 if (pci.pci_dev_ct >= PCI_CONFIG_MAX_DEV) 151 return (1); 152 153 /* Exceeded max IRQs? */ 154 /* XXX we could share IRQs ... */ 155 if (pci.pci_next_pic_irq >= PCI_MAX_PIC_IRQS && irq_needed) 156 return (1); 157 158 *id = pci.pci_dev_ct; 159 160 pci.pci_devices[*id].pd_vid = vid; 161 pci.pci_devices[*id].pd_did = pid; 162 pci.pci_devices[*id].pd_class = class; 163 pci.pci_devices[*id].pd_subclass = subclass; 164 pci.pci_devices[*id].pd_subsys_vid = subsys_vid; 165 pci.pci_devices[*id].pd_subsys_id = subsys_id; 166 167 pci.pci_devices[*id].pd_csfunc = csfunc; 168 169 if (irq_needed) { 170 pci.pci_devices[*id].pd_irq = 171 pci_pic_irqs[pci.pci_next_pic_irq]; 172 pci.pci_devices[*id].pd_int = 1; 173 pci.pci_next_pic_irq++; 174 dprintf("assigned irq %d to pci dev %d", 175 pci.pci_devices[*id].pd_irq, *id); 176 } 177 178 pci.pci_dev_ct ++; 179 180 return (0); 181 } 182 183 /* 184 * pci_init 185 * 186 * Initializes the PCI subsystem for the VM by adding a PCI host bridge 187 * as the first PCI device. 188 */ 189 void 190 pci_init(void) 191 { 192 uint8_t id; 193 194 memset(&pci, 0, sizeof(pci)); 195 pci.pci_next_mmio_bar = VMM_PCI_MMIO_BAR_BASE; 196 pci.pci_next_io_bar = VMM_PCI_IO_BAR_BASE; 197 198 if (pci_add_device(&id, PCI_VENDOR_OPENBSD, PCI_PRODUCT_OPENBSD_PCHB, 199 PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST, 200 PCI_VENDOR_OPENBSD, 0, 0, NULL)) { 201 log_warnx("%s: can't add PCI host bridge", __progname); 202 return; 203 } 204 } 205 206 void 207 pci_handle_address_reg(struct vm_run_params *vrp) 208 { 209 union vm_exit *vei = vrp->vrp_exit; 210 211 /* 212 * vei_dir == VEI_DIR_OUT : out instruction 213 * 214 * The guest wrote to the address register. 215 */ 216 if (vei->vei.vei_dir == VEI_DIR_OUT) { 217 pci.pci_addr_reg = vei->vei.vei_data; 218 } else { 219 /* 220 * vei_dir == VEI_DIR_IN : in instruction 221 * 222 * The guest read the address register 223 */ 224 vei->vei.vei_data = pci.pci_addr_reg; 225 } 226 } 227 228 uint8_t 229 pci_handle_io(struct vm_run_params *vrp) 230 { 231 int i, j, k, l; 232 uint16_t reg, b_hi, b_lo; 233 pci_iobar_fn_t fn; 234 union vm_exit *vei = vrp->vrp_exit; 235 uint8_t intr, dir; 236 237 k = -1; 238 l = -1; 239 reg = vei->vei.vei_port; 240 dir = vei->vei.vei_dir; 241 intr = 0xFF; 242 243 for (i = 0 ; i < pci.pci_dev_ct ; i++) { 244 for (j = 0 ; j < pci.pci_devices[i].pd_bar_ct; j++) { 245 b_lo = PCI_MAPREG_IO_ADDR(pci.pci_devices[i].pd_bar[j]); 246 b_hi = b_lo + VMM_PCI_IO_BAR_SIZE; 247 if (reg >= b_lo && reg < b_hi) { 248 if (pci.pci_devices[i].pd_barfunc[j]) { 249 k = j; 250 l = i; 251 } 252 } 253 } 254 } 255 256 if (k >= 0 && l >= 0) { 257 fn = (pci_iobar_fn_t)pci.pci_devices[l].pd_barfunc[k]; 258 if (fn(vei->vei.vei_dir, reg - 259 PCI_MAPREG_IO_ADDR(pci.pci_devices[l].pd_bar[k]), 260 &vei->vei.vei_data, &intr, 261 pci.pci_devices[l].pd_bar_cookie[k])) { 262 log_warnx("%s: pci i/o access function failed", 263 __progname); 264 } 265 } else { 266 log_warnx("%s: no pci i/o function for reg 0x%llx", 267 __progname, (uint64_t)reg); 268 /* Reads from undefined ports return 0xFF */ 269 if (dir == 1) 270 vei->vei.vei_data = 0xFFFFFFFF; 271 } 272 273 if (intr != 0xFF) { 274 intr = pci.pci_devices[l].pd_irq; 275 } 276 277 return (intr); 278 } 279 280 void 281 pci_handle_data_reg(struct vm_run_params *vrp) 282 { 283 union vm_exit *vei = vrp->vrp_exit; 284 uint8_t b, d, f, o, baridx; 285 int ret; 286 pci_cs_fn_t csfunc; 287 288 /* abort if the address register is wack */ 289 if (!(pci.pci_addr_reg & PCI_MODE1_ENABLE)) { 290 /* if read, return FFs */ 291 if (vei->vei.vei_dir == VEI_DIR_IN) 292 vei->vei.vei_data = 0xffffffff; 293 log_warnx("invalid address register during pci read: " 294 "0x%llx", (uint64_t)pci.pci_addr_reg); 295 return; 296 } 297 298 b = (pci.pci_addr_reg >> 16) & 0xff; 299 d = (pci.pci_addr_reg >> 11) & 0x1f; 300 f = (pci.pci_addr_reg >> 8) & 0x7; 301 o = (pci.pci_addr_reg & 0xfc); 302 303 csfunc = pci.pci_devices[d].pd_csfunc; 304 if (csfunc != NULL) { 305 ret = csfunc(vei->vei.vei_dir, (o / 4), &vei->vei.vei_data); 306 if (ret) 307 log_warnx("cfg space access function failed for " 308 "pci device %d", d); 309 return; 310 } 311 312 /* No config space function, fallback to default simple r/w impl. */ 313 314 /* 315 * vei_dir == VEI_DIR_OUT : out instruction 316 * 317 * The guest wrote to the config space location denoted by the current 318 * value in the address register. 319 */ 320 if (vei->vei.vei_dir == VEI_DIR_OUT) { 321 if ((o >= 0x10 && o <= 0x24) && 322 vei->vei.vei_data == 0xffffffff) { 323 /* 324 * Compute BAR index: 325 * o = 0x10 -> baridx = 1 326 * o = 0x14 -> baridx = 2 327 * o = 0x18 -> baridx = 3 328 * o = 0x1c -> baridx = 4 329 * o = 0x20 -> baridx = 5 330 * o = 0x24 -> baridx = 6 331 */ 332 baridx = (o / 4) - 3; 333 if (pci.pci_devices[d].pd_bar_ct >= baridx) 334 vei->vei.vei_data = 0xfffff000; 335 else 336 vei->vei.vei_data = 0; 337 } 338 pci.pci_devices[d].pd_cfg_space[o / 4] = vei->vei.vei_data; 339 } else { 340 /* 341 * vei_dir == VEI_DIR_IN : in instruction 342 * 343 * The guest read from the config space location determined by 344 * the current value in the address register. 345 */ 346 vei->vei.vei_data = pci.pci_devices[d].pd_cfg_space[o / 4]; 347 } 348 } 349