1 /* $NetBSD: hypervisor.h,v 1.6 2017/02/10 23:26:23 palle Exp $ */ 2 /* $OpenBSD: hypervisor.h,v 1.14 2011/06/26 17:23:46 kettenis Exp $ */ 3 4 /* 5 * Copyright (c) 2008 Mark Kettenis 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #ifndef _HYPERVISOR_H_ 21 #define _HYPERVISOR_H_ 22 23 /* 24 * UltraSPARC Hypervisor API. 25 */ 26 27 /* 28 * FAST_TRAP function numbers 29 */ 30 31 #define FT_MMU_MAP_PERM_ADDR 0x25 32 33 /* 34 * API versioning 35 */ 36 37 #ifndef _LOCORE 38 int64_t hv_api_get_version(uint64_t api_group, 39 uint64_t *major_number, uint64_t *minor_number); 40 #endif 41 /* 42 * Domain services 43 */ 44 45 #ifndef _LOCORE 46 int64_t hv_mach_desc(paddr_t buffer, psize_t *length); 47 #endif 48 49 /* 50 * CPU services 51 */ 52 53 #ifndef _LOCORE 54 void hv_cpu_yield(void); 55 int64_t hv_cpu_qconf(uint64_t queue, uint64_t base, uint64_t nentries); 56 #endif 57 58 #define CPU_MONDO_QUEUE 0x3c 59 #define DEVICE_MONDO_QUEUE 0x3d 60 61 #ifndef _LOCORE 62 int64_t hv_cpu_mondo_send(uint64_t ncpus, paddr_t cpulist, paddr_t data); 63 int64_t hv_cpu_myid(uint64_t *cpuid); 64 #endif 65 66 /* 67 * MMU services 68 */ 69 70 #ifndef _LOCORE 71 int64_t hv_mmu_demap_page(vaddr_t vaddr, uint64_t context, uint64_t flags); 72 int64_t hv_mmu_demap_ctx(uint64_t context, uint64_t flags); 73 int64_t hv_mmu_demap_all(uint64_t flags); 74 int64_t hv_mmu_map_perm_addr(vaddr_t vaddr, uint64_t tte, uint64_t flags); 75 int64_t hv_mmu_unmap_perm_addr(vaddr_t vaddr, uint64_t flags); 76 int64_t hv_mmu_map_addr(vaddr_t vaddr, uint64_t context, uint64_t tte, 77 uint64_t flags); 78 int64_t hv_mmu_unmap_addr(vaddr_t vaddr, uint64_t context, uint64_t flags); 79 #endif 80 81 #define MAP_DTLB 0x1 82 #define MAP_ITLB 0x2 83 84 #ifndef _LOCORE 85 struct tsb_desc { 86 uint16_t td_idxpgsz; 87 uint16_t td_assoc; 88 uint32_t td_size; 89 uint32_t td_ctxidx; 90 uint32_t td_pgsz; 91 paddr_t td_pa; 92 uint64_t td_reserved; 93 }; 94 95 struct mmufsa { 96 uint64_t ift; /* instruction fault type */ 97 uint64_t ifa; /* instruction fault address */ 98 uint64_t ifc; /* instruction fault context */ 99 uint64_t reserved1[5]; /* reserved */ 100 uint64_t dft; /* data fault type */ 101 uint64_t dfa; /* data fault address */ 102 uint64_t dfc; /* data fault context */ 103 uint64_t reserved2[5]; /* reserved */ 104 }; 105 106 int64_t hv_mmu_tsb_ctx0(uint64_t ntsb, paddr_t tsbptr); 107 int64_t hv_mmu_tsb_ctxnon0(uint64_t ntsb, paddr_t tsbptr); 108 #endif 109 110 /* 111 * Cache and memory services 112 */ 113 114 #ifndef _LOCORE 115 int64_t hv_mem_scrub(paddr_t raddr, psize_t length); 116 int64_t hv_mem_sync(paddr_t raddr, psize_t length); 117 #endif 118 119 /* 120 * Device interrupt services 121 */ 122 123 #ifndef _LOCORE 124 int64_t hv_intr_devino_to_sysino(uint64_t devhandle, uint64_t devino, 125 uint64_t *sysino); 126 int64_t hv_intr_getenabled(uint64_t sysino, uint64_t *intr_enabled); 127 int64_t hv_intr_setenabled(uint64_t sysino, uint64_t intr_enabled); 128 int64_t hv_intr_getstate(uint64_t sysino, uint64_t *intr_state); 129 int64_t hv_intr_setstate(uint64_t sysino, uint64_t intr_state); 130 int64_t hv_intr_gettarget(uint64_t sysino, uint64_t *cpuid); 131 int64_t hv_intr_settarget(uint64_t sysino, uint64_t cpuid); 132 #endif 133 134 #define INTR_DISABLED 0 135 #define INTR_ENABLED 1 136 137 #define INTR_IDLE 0 138 #define INTR_RECEIVED 1 139 #define INTR_DELIVERED 2 140 141 #ifndef _LOCORE 142 int64_t hv_vintr_getcookie(uint64_t devhandle, uint64_t devino, 143 uint64_t *cookie_value); 144 int64_t hv_vintr_setcookie(uint64_t devhandle, uint64_t devino, 145 uint64_t cookie_value); 146 int64_t hv_vintr_getenabled(uint64_t devhandle, uint64_t devino, 147 uint64_t *intr_enabled); 148 int64_t hv_vintr_setenabled(uint64_t devhandle, uint64_t devino, 149 uint64_t intr_enabled); 150 int64_t hv_vintr_getstate(uint64_t devhandle, uint64_t devino, 151 uint64_t *intr_state); 152 int64_t hv_vintr_setstate(uint64_t devhandle, uint64_t devino, 153 uint64_t intr_state); 154 int64_t hv_vintr_gettarget(uint64_t devhandle, uint64_t devino, 155 uint64_t *cpuid); 156 int64_t hv_vintr_settarget(uint64_t devhandle, uint64_t devino, 157 uint64_t cpuid); 158 #endif 159 160 /* 161 * Time of day services 162 */ 163 164 #ifndef _LOCORE 165 int64_t hv_tod_get(uint64_t *tod); 166 int64_t hv_tod_set(uint64_t tod); 167 #endif 168 169 /* 170 * Console services 171 */ 172 173 #ifndef _LOCORE 174 int64_t hv_cons_getchar(int64_t *ch); 175 int64_t hv_cons_putchar(int64_t ch); 176 int64_t hv_api_putchar(int64_t ch); 177 #endif 178 179 #define CONS_BREAK -1 180 #define CONS_HUP -2 181 182 /* 183 * Domain state services 184 */ 185 186 #ifndef _LOCORE 187 int64_t hv_soft_state_set(uint64_t software_state, 188 paddr_t software_description_ptr); 189 #endif 190 191 #define SIS_NORMAL 0x1 192 #define SIS_TRANSITION 0x2 193 194 /* 195 * PCI I/O services 196 */ 197 198 #ifndef _LOCORE 199 int64_t hv_pci_iommu_map(uint64_t devhandle, uint64_t tsbid, 200 uint64_t nttes, uint64_t io_attributes, paddr_t io_page_list_p, 201 uint64_t *nttes_mapped); 202 int64_t hv_pci_iommu_demap(uint64_t devhandle, uint64_t tsbid, 203 uint64_t nttes, uint64_t *nttes_demapped); 204 int64_t hv_pci_iommu_getmap(uint64_t devhandle, uint64_t tsbid, 205 uint64_t *io_attributes, paddr_t *r_addr); 206 int64_t hv_pci_iommu_getbypass(uint64_t devhandle, paddr_t r_addr, 207 uint64_t io_attributes, uint64_t *io_addr); 208 209 int64_t hv_pci_config_get(uint64_t devhandle, uint64_t pci_device, 210 uint64_t pci_config_offset, uint64_t size, 211 uint64_t *error_flag, uint64_t *data); 212 int64_t hv_pci_config_put(uint64_t devhandle, uint64_t pci_device, 213 uint64_t pci_config_offset, uint64_t size, uint64_t data, 214 uint64_t *error_flag); 215 #endif 216 217 #define PCI_MAP_ATTR_READ 0x01 /* From memory */ 218 #define PCI_MAP_ATTR_WRITE 0x02 /* To memory */ 219 220 /* 221 * PCI MSI services 222 */ 223 224 #ifndef _LOCORE 225 int64_t hv_pci_msiq_conf(uint64_t devhandle, uint64_t msiqid, 226 uint64_t r_addr, uint64_t nentries); 227 int64_t hv_pci_msiq_info(uint64_t devhandle, uint64_t msiqid, 228 uint64_t *r_addr, uint64_t *nentries); 229 230 int64_t hv_pci_msiq_getvalid(uint64_t devhandle, uint64_t msiqid, 231 uint64_t *msiqvalid); 232 int64_t hv_pci_msiq_setvalid(uint64_t devhandle, uint64_t msiqid, 233 uint64_t msiqvalid); 234 #endif 235 236 #define PCI_MSIQ_INVALID 0 237 #define PCI_MSIQ_VALID 1 238 239 #ifndef _LOCORE 240 int64_t hv_pci_msiq_getstate(uint64_t devhandle, uint64_t msiqid, 241 uint64_t *msiqstate); 242 int64_t hv_pci_msiq_setstate(uint64_t devhandle, uint64_t msiqid, 243 uint64_t msiqstate); 244 #endif 245 246 #define PCI_MSIQSTATE_IDLE 0 247 #define PCI_MSIQSTATE_ERROR 1 248 249 #ifndef _LOCORE 250 int64_t hv_pci_msiq_gethead(uint64_t devhandle, uint64_t msiqid, 251 uint64_t *msiqhead); 252 int64_t hv_pci_msiq_sethead(uint64_t devhandle, uint64_t msiqid, 253 uint64_t msiqhead); 254 int64_t hv_pci_msiq_gettail(uint64_t devhandle, uint64_t msiqid, 255 uint64_t *msiqtail); 256 257 int64_t hv_pci_msi_getvalid(uint64_t devhandle, uint64_t msinum, 258 uint64_t *msivalidstate); 259 int64_t hv_pci_msi_setvalid(uint64_t devhandle, uint64_t msinum, 260 uint64_t msivalidstate); 261 #endif 262 263 #define PCI_MSI_INVALID 0 264 #define PCI_MSI_VALID 1 265 266 #ifndef _LOCORE 267 int64_t hv_pci_msi_getmsiq(uint64_t devhandle, uint64_t msinum, 268 uint64_t *msiqid); 269 int64_t hv_pci_msi_setmsiq(uint64_t devhandle, uint64_t msinum, 270 uint64_t msitype, uint64_t msiqid); 271 272 int64_t hv_pci_msi_getstate(uint64_t devhandle, uint64_t msinum, 273 uint64_t *msistate); 274 int64_t hv_pci_msi_setstate(uint64_t devhandle, uint64_t msinum, 275 uint64_t msistate); 276 #endif 277 278 #define PCI_MSISTATE_IDLE 0 279 #define PCI_MSISTATE_DELIVERED 1 280 281 #ifndef _LOCORE 282 int64_t hv_pci_msg_getmsiq(uint64_t devhandle, uint64_t msg, 283 uint64_t *msiqid); 284 int64_t hv_pci_msg_setmsiq(uint64_t devhandle, uint64_t msg, 285 uint64_t msiqid); 286 287 int64_t hv_pci_msg_getvalid(uint64_t devhandle, uint64_t msg, 288 uint64_t *msgvalidstate); 289 int64_t hv_pci_msg_setvalid(uint64_t devhandle, uint64_t msg, 290 uint64_t msgvalidstate); 291 #endif 292 293 #define PCIE_MSG_INVALID 0 294 #define PCIE_MSG_VALID 1 295 296 #define PCIE_PME_MSG 0x18 297 #define PCIE_PME_ACK_MSG 0x1b 298 #define PCIE_CORR_MSG 0x30 299 #define PCIE_NONFATAL_MSG 0x31 300 #define PCIE_FATAL_MSG 0x32 301 302 /* 303 * Logical Domain Channel services 304 */ 305 306 #ifndef _LOCORE 307 int64_t hv_ldc_tx_qconf(uint64_t ldc_id, paddr_t base_raddr, 308 uint64_t nentries); 309 int64_t hv_ldc_tx_qinfo(uint64_t ldc_id, paddr_t *base_raddr, 310 uint64_t *nentries); 311 int64_t hv_ldc_tx_get_state(uint64_t ldc_id, uint64_t *head_offset, 312 uint64_t *tail_offset, uint64_t *channel_state); 313 int64_t hv_ldc_tx_set_qtail(uint64_t ldc_id, uint64_t tail_offset); 314 int64_t hv_ldc_rx_qconf(uint64_t ldc_id, paddr_t base_raddr, 315 uint64_t nentries); 316 int64_t hv_ldc_rx_qinfo(uint64_t ldc_id, paddr_t *base_raddr, 317 uint64_t *nentries); 318 int64_t hv_ldc_rx_get_state(uint64_t ldc_id, uint64_t *head_offset, 319 uint64_t *tail_offset, uint64_t *channel_state); 320 int64_t hv_ldc_rx_set_qhead(uint64_t ldc_id, uint64_t head_offset); 321 #endif 322 323 #define LDC_CHANNEL_DOWN 0 324 #define LDC_CHANNEL_UP 1 325 #define LDC_CHANNEL_RESET 2 326 327 #ifndef _LOCORE 328 int64_t hv_ldc_set_map_table(uint64_t ldc_id, paddr_t base_raddr, 329 uint64_t nentries); 330 int64_t hv_ldc_get_map_table(uint64_t ldc_id, paddr_t *base_raddr, 331 uint64_t *nentries); 332 int64_t hv_ldc_copy(uint64_t ldc_id, uint64_t flags, uint64_t cookie, 333 paddr_t raddr, psize_t length, psize_t *ret_length); 334 #endif 335 336 #define LDC_COPY_IN 0 337 #define LDC_COPY_OUT 1 338 339 #ifndef _LOCORE 340 int64_t hv_ldc_mapin(uint64_t ldc_id, uint64_t cookie, paddr_t *raddr, 341 uint64_t *perms); 342 int64_t hv_ldc_unmap(paddr_t raddr, uint64_t *perms); 343 #endif 344 345 /* 346 * Cryptographic services 347 */ 348 349 #ifndef _LOCORE 350 int64_t hv_rng_get_diag_control(void); 351 int64_t hv_rng_ctl_read(paddr_t raddr, uint64_t *state, uint64_t *delta); 352 int64_t hv_rng_ctl_write(paddr_t raddr, uint64_t state, uint64_t timeout, 353 uint64_t *delta); 354 #endif 355 356 #define RNG_STATE_UNCONFIGURED 0 357 #define RNG_STATE_CONFIGURED 1 358 #define RNG_STATE_HEALTHCHECK 2 359 #define RNG_STATE_ERROR 3 360 361 #ifndef _LOCORE 362 int64_t hv_rng_data_read_diag(paddr_t raddr, uint64_t size, uint64_t *delta); 363 int64_t hv_rng_data_read(paddr_t raddr, uint64_t *delta); 364 #endif 365 366 /* 367 * Error codes 368 */ 369 370 #define H_EOK 0 371 #define H_ENOCPU 1 372 #define H_ENORADDR 2 373 #define H_ENOINTR 3 374 #define H_EBADPGSZ 4 375 #define H_EBADTSB 5 376 #define H_EINVAL 6 377 #define H_EBADTRAP 7 378 #define H_EBADALIGN 8 379 #define H_EWOULDBLOCK 9 380 #define H_ENOACCESS 10 381 #define H_EIO 11 382 #define H_ECPUERROR 12 383 #define H_ENOTSUPPORTED 13 384 #define H_ENOMAP 14 385 #define H_ETOOMANY 15 386 #define H_ECHANNEL 16 387 388 #endif /* _HYPERVISOR_H_ */ 389