1 /* $OpenBSD: vmt.c,v 1.23 2021/02/11 11:57:32 mestre Exp $ */ 2 3 /* 4 * Copyright (c) 2007 David Crawshaw <david@zentus.com> 5 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org> 6 * 7 * Permission to use, copy, modify, and distribute this software for any 8 * purpose with or without fee is hereby granted, provided that the above 9 * copyright notice and this permission notice appear in all copies. 10 * 11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 18 */ 19 20 #if !defined(__i386__) && !defined(__amd64__) 21 #error vmt(4) is only supported on i386 and amd64 22 #endif 23 24 /* 25 * Protocol reverse engineered by Ken Kato: 26 * https://sites.google.com/site/chitchatvmback/backdoor 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/malloc.h> 33 #include <sys/timeout.h> 34 #include <sys/signalvar.h> 35 #include <sys/syslog.h> 36 #include <sys/proc.h> 37 #include <sys/socket.h> 38 #include <sys/ioctl.h> 39 #include <sys/mount.h> 40 #include <sys/task.h> 41 #include <sys/sensors.h> 42 43 #include <net/if.h> 44 #include <net/if_dl.h> 45 #include <net/if_var.h> 46 #include <net/if_types.h> 47 #include <net/rtable.h> 48 #include <netinet/in.h> 49 #include <netinet/if_ether.h> 50 51 #include <dev/pv/pvvar.h> 52 53 /* "The" magic number, always occupies the EAX register. */ 54 #define VM_MAGIC 0x564D5868 55 56 /* Port numbers, passed on EDX.LOW . */ 57 #define VM_PORT_CMD 0x5658 58 #define VM_PORT_RPC 0x5659 59 60 /* Commands, passed on ECX.LOW. */ 61 #define VM_CMD_GET_SPEED 0x01 62 #define VM_CMD_APM 0x02 63 #define VM_CMD_GET_MOUSEPOS 0x04 64 #define VM_CMD_SET_MOUSEPOS 0x05 65 #define VM_CMD_GET_CLIPBOARD_LEN 0x06 66 #define VM_CMD_GET_CLIPBOARD 0x07 67 #define VM_CMD_SET_CLIPBOARD_LEN 0x08 68 #define VM_CMD_SET_CLIPBOARD 0x09 69 #define VM_CMD_GET_VERSION 0x0a 70 #define VM_VERSION_UNMANAGED 0x7fffffff 71 #define VM_CMD_GET_DEVINFO 0x0b 72 #define VM_CMD_DEV_ADDREMOVE 0x0c 73 #define VM_CMD_GET_GUI_OPTIONS 0x0d 74 #define VM_CMD_SET_GUI_OPTIONS 0x0e 75 #define VM_CMD_GET_SCREEN_SIZE 0x0f 76 #define VM_CMD_GET_HWVER 0x11 77 #define VM_CMD_POPUP_OSNOTFOUND 0x12 78 #define VM_CMD_GET_BIOS_UUID 0x13 79 #define VM_CMD_GET_MEM_SIZE 0x14 80 /*#define VM_CMD_GET_TIME 0x17 */ /* deprecated */ 81 #define VM_CMD_RPC 0x1e 82 #define VM_CMD_GET_TIME_FULL 0x2e 83 84 /* RPC sub-commands, passed on ECX.HIGH. */ 85 #define VM_RPC_OPEN 0x00 86 #define VM_RPC_SET_LENGTH 0x01 87 #define VM_RPC_SET_DATA 0x02 88 #define VM_RPC_GET_LENGTH 0x03 89 #define VM_RPC_GET_DATA 0x04 90 #define VM_RPC_GET_END 0x05 91 #define VM_RPC_CLOSE 0x06 92 93 /* RPC magic numbers, passed on EBX. */ 94 #define VM_RPC_OPEN_RPCI 0x49435052UL /* with VM_RPC_OPEN. */ 95 #define VM_RPC_OPEN_TCLO 0x4F4C4354UL /* with VP_RPC_OPEN. */ 96 #define VM_RPC_ENH_DATA 0x00010000UL /* with enhanced RPC data calls. */ 97 98 #define VM_RPC_FLAG_COOKIE 0x80000000UL 99 100 /* RPC reply flags */ 101 #define VM_RPC_REPLY_SUCCESS 0x0001 102 #define VM_RPC_REPLY_DORECV 0x0002 /* incoming message available */ 103 #define VM_RPC_REPLY_CLOSED 0x0004 /* RPC channel is closed */ 104 #define VM_RPC_REPLY_UNSENT 0x0008 /* incoming message was removed? */ 105 #define VM_RPC_REPLY_CHECKPOINT 0x0010 /* checkpoint occurred -> retry */ 106 #define VM_RPC_REPLY_POWEROFF 0x0020 /* underlying device is powering off */ 107 #define VM_RPC_REPLY_TIMEOUT 0x0040 108 #define VM_RPC_REPLY_HB 0x0080 /* high-bandwidth tx/rx available */ 109 110 /* VM state change IDs */ 111 #define VM_STATE_CHANGE_HALT 1 112 #define VM_STATE_CHANGE_REBOOT 2 113 #define VM_STATE_CHANGE_POWERON 3 114 #define VM_STATE_CHANGE_RESUME 4 115 #define VM_STATE_CHANGE_SUSPEND 5 116 117 /* VM guest info keys */ 118 #define VM_GUEST_INFO_DNS_NAME 1 119 #define VM_GUEST_INFO_IP_ADDRESS 2 120 #define VM_GUEST_INFO_DISK_FREE_SPACE 3 121 #define VM_GUEST_INFO_BUILD_NUMBER 4 122 #define VM_GUEST_INFO_OS_NAME_FULL 5 123 #define VM_GUEST_INFO_OS_NAME 6 124 #define VM_GUEST_INFO_UPTIME 7 125 #define VM_GUEST_INFO_MEMORY 8 126 #define VM_GUEST_INFO_IP_ADDRESS_V2 9 127 #define VM_GUEST_INFO_IP_ADDRESS_V3 10 128 129 /* RPC responses */ 130 #define VM_RPC_REPLY_OK "OK " 131 #define VM_RPC_RESET_REPLY "OK ATR toolbox" 132 #define VM_RPC_REPLY_ERROR "ERROR Unknown command" 133 #define VM_RPC_REPLY_ERROR_IP_ADDR "ERROR Unable to find guest IP address" 134 135 /* VM backup error codes */ 136 #define VM_BACKUP_SUCCESS 0 137 #define VM_BACKUP_SYNC_ERROR 3 138 #define VM_BACKUP_REMOTE_ABORT 4 139 140 #define VM_BACKUP_TIMEOUT 30 /* seconds */ 141 142 /* NIC/IP address stuff */ 143 #define VM_NICINFO_VERSION 3 144 145 #define VM_NICINFO_IP_LEN 64 146 #define VM_NICINFO_MAX_NICS 16 147 #define VM_NICINFO_MAX_ADDRS 2048 148 #define VM_NICINFO_MAC_LEN 20 149 150 #define VM_NICINFO_ADDR_IPV4 1 151 #define VM_NICINFO_ADDR_IPV6 2 152 153 struct vm_nicinfo_addr_v4 { 154 uint32_t v4_addr_type; 155 uint32_t v4_addr_len; 156 struct in_addr v4_addr; 157 uint32_t v4_prefix_len; 158 uint32_t v4_origin; 159 uint32_t v4_status; 160 }; 161 162 struct vm_nicinfo_addr_v6 { 163 uint32_t v6_addr_type; 164 uint32_t v6_addr_len; 165 struct in6_addr v6_addr; 166 uint32_t v6_prefix_len; 167 uint32_t v6_origin; 168 uint32_t v6_status; 169 }; 170 171 struct vm_nicinfo_nic { 172 uint32_t ni_mac_len; 173 char ni_mac[VM_NICINFO_MAC_LEN]; 174 uint32_t ni_num_addrs; 175 }; 176 177 struct vm_nicinfo_nic_nomac { 178 uint32_t nn_mac_len; 179 uint32_t nn_num_addrs; 180 }; 181 182 struct vm_nicinfo_nic_post { 183 uint32_t np_dns_config; 184 uint32_t np_wins_config; 185 uint32_t np_dhcpv4_config; 186 uint32_t np_dhcpv6_config; 187 }; 188 189 struct vm_nicinfo_nic_list { 190 uint32_t nl_version; 191 uint32_t nl_nic_list; 192 uint32_t nl_num_nics; 193 }; 194 195 struct vm_nicinfo_nic_list_post { 196 uint32_t nl_num_routes; 197 uint32_t nl_dns_config; 198 uint32_t nl_wins_config; 199 uint32_t nl_dhcpv4_config; 200 uint32_t nl_dhcpv6_config; 201 }; 202 203 #define VM_NICINFO_CMD "SetGuestInfo 10 " 204 205 /* A register. */ 206 union vm_reg { 207 struct { 208 uint16_t low; 209 uint16_t high; 210 } part; 211 uint32_t word; 212 #ifdef __amd64__ 213 struct { 214 uint32_t low; 215 uint32_t high; 216 } words; 217 uint64_t quad; 218 #endif 219 } __packed; 220 221 /* A register frame. */ 222 struct vm_backdoor { 223 union vm_reg eax; 224 union vm_reg ebx; 225 union vm_reg ecx; 226 union vm_reg edx; 227 union vm_reg esi; 228 union vm_reg edi; 229 union vm_reg ebp; 230 } __packed; 231 232 /* RPC context. */ 233 struct vm_rpc { 234 uint16_t channel; 235 uint32_t cookie1; 236 uint32_t cookie2; 237 }; 238 239 struct vmt_softc { 240 struct device sc_dev; 241 242 struct vm_rpc sc_tclo_rpc; 243 char *sc_rpc_buf; 244 int sc_rpc_error; 245 int sc_tclo_ping; 246 int sc_set_guest_os; 247 int sc_quiesce; 248 struct task sc_quiesce_task; 249 struct task sc_nicinfo_task; 250 #define VMT_RPC_BUFLEN 4096 251 252 struct timeout sc_tick; 253 struct timeout sc_tclo_tick; 254 struct ksensordev sc_sensordev; 255 struct ksensor sc_sensor; 256 257 char sc_hostname[MAXHOSTNAMELEN]; 258 size_t sc_nic_info_size; 259 char *sc_nic_info; 260 }; 261 262 #ifdef VMT_DEBUG 263 #define DPRINTF(_arg...) printf(_arg) 264 #else 265 #define DPRINTF(_arg...) do {} while(0) 266 #endif 267 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname) 268 269 void vm_cmd(struct vm_backdoor *); 270 void vm_ins(struct vm_backdoor *); 271 void vm_outs(struct vm_backdoor *); 272 273 /* Functions for communicating with the VM Host. */ 274 int vm_rpc_open(struct vm_rpc *, uint32_t); 275 int vm_rpc_close(struct vm_rpc *); 276 int vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t); 277 int vm_rpc_send_str(const struct vm_rpc *, const uint8_t *); 278 int vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *); 279 int vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t); 280 int vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t); 281 int vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...) 282 __attribute__((__format__(__kprintf__,2,3))); 283 int vm_rpci_response_successful(struct vmt_softc *); 284 285 int vmt_kvop(void *, int, char *, char *, size_t); 286 287 void vmt_probe_cmd(struct vm_backdoor *, uint16_t); 288 void vmt_tclo_state_change_success(struct vmt_softc *, int, char); 289 void vmt_do_reboot(struct vmt_softc *); 290 void vmt_do_shutdown(struct vmt_softc *); 291 void vmt_shutdown(void *); 292 293 void vmt_clear_guest_info(struct vmt_softc *); 294 void vmt_update_guest_info(struct vmt_softc *); 295 void vmt_update_guest_uptime(struct vmt_softc *); 296 297 void vmt_tick_hook(struct device *self); 298 void vmt_tick(void *); 299 void vmt_resume(void); 300 301 int vmt_match(struct device *, void *, void *); 302 void vmt_attach(struct device *, struct device *, void *); 303 int vmt_activate(struct device *, int); 304 305 void vmt_tclo_tick(void *); 306 int vmt_tclo_process(struct vmt_softc *, const char *); 307 void vmt_tclo_reset(struct vmt_softc *); 308 void vmt_tclo_ping(struct vmt_softc *); 309 void vmt_tclo_halt(struct vmt_softc *); 310 void vmt_tclo_reboot(struct vmt_softc *); 311 void vmt_tclo_poweron(struct vmt_softc *); 312 void vmt_tclo_suspend(struct vmt_softc *); 313 void vmt_tclo_resume(struct vmt_softc *); 314 void vmt_tclo_capreg(struct vmt_softc *); 315 void vmt_tclo_broadcastip(struct vmt_softc *); 316 317 void vmt_set_backup_status(struct vmt_softc *, const char *, int, 318 const char *); 319 void vmt_quiesce_task(void *); 320 void vmt_quiesce_done_task(void *); 321 void vmt_tclo_abortbackup(struct vmt_softc *); 322 void vmt_tclo_startbackup(struct vmt_softc *); 323 void vmt_tclo_backupdone(struct vmt_softc *); 324 325 size_t vmt_xdr_ifaddr(struct ifaddr *, char *); 326 size_t vmt_xdr_nic_entry(struct ifnet *, char *); 327 size_t vmt_xdr_nic_info(char *); 328 void vmt_nicinfo_task(void *); 329 330 int vmt_probe(void); 331 332 struct vmt_tclo_rpc { 333 const char *name; 334 void (*cb)(struct vmt_softc *); 335 } vmt_tclo_rpc[] = { 336 /* Keep sorted by name (case-sensitive) */ 337 { "Capabilities_Register", vmt_tclo_capreg }, 338 { "OS_Halt", vmt_tclo_halt }, 339 { "OS_PowerOn", vmt_tclo_poweron }, 340 { "OS_Reboot", vmt_tclo_reboot }, 341 { "OS_Resume", vmt_tclo_resume }, 342 { "OS_Suspend", vmt_tclo_suspend }, 343 { "Set_Option broadcastIP 1", vmt_tclo_broadcastip }, 344 { "ping", vmt_tclo_ping }, 345 { "reset", vmt_tclo_reset }, 346 { "vmbackup.abort", vmt_tclo_abortbackup }, 347 { "vmbackup.snapshotDone", vmt_tclo_backupdone }, 348 { "vmbackup.start 1", vmt_tclo_startbackup }, 349 { NULL }, 350 #if 0 351 /* Various unsupported commands */ 352 { "Set_Option autohide 0" }, 353 { "Set_Option copypaste 1" }, 354 { "Set_Option enableDnD 1" }, 355 { "Set_Option enableMessageBusTunnel 0" }, 356 { "Set_Option linkRootHgfsShare 0" }, 357 { "Set_Option mapRootHgfsShare 0" }, 358 { "Set_Option synctime 1" }, 359 { "Set_Option synctime.period 0" }, 360 { "Set_Option time.synchronize.tools.enable 1" }, 361 { "Set_Option time.synchronize.tools.percentCorrection 0" }, 362 { "Set_Option time.synchronize.tools.slewCorrection 1" }, 363 { "Set_Option time.synchronize.tools.startup 1" }, 364 { "Set_Option toolScripts.afterPowerOn 1" }, 365 { "Set_Option toolScripts.afterResume 1" }, 366 { "Set_Option toolScripts.beforePowerOff 1" }, 367 { "Set_Option toolScripts.beforeSuspend 1" }, 368 { "Time_Synchronize 0" }, 369 { "Vix_1_Relayed_Command \"38cdcae40e075d66\"" }, 370 #endif 371 }; 372 373 struct cfattach vmt_ca = { 374 sizeof(struct vmt_softc), 375 vmt_match, 376 vmt_attach, 377 NULL, 378 vmt_activate 379 }; 380 381 struct cfdriver vmt_cd = { 382 NULL, 383 "vmt", 384 DV_DULL 385 }; 386 387 extern char hostname[MAXHOSTNAMELEN]; 388 389 void 390 vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd) 391 { 392 bzero(frame, sizeof(*frame)); 393 394 (frame->eax).word = VM_MAGIC; 395 (frame->ebx).word = ~VM_MAGIC; 396 (frame->ecx).part.low = cmd; 397 (frame->ecx).part.high = 0xffff; 398 (frame->edx).part.low = VM_PORT_CMD; 399 (frame->edx).part.high = 0; 400 401 vm_cmd(frame); 402 } 403 404 int 405 vmt_probe(void) 406 { 407 struct vm_backdoor frame; 408 409 vmt_probe_cmd(&frame, VM_CMD_GET_VERSION); 410 if (frame.eax.word == 0xffffffff || 411 frame.ebx.word != VM_MAGIC) 412 return (0); 413 414 vmt_probe_cmd(&frame, VM_CMD_GET_SPEED); 415 if (frame.eax.word == VM_MAGIC) 416 return (0); 417 418 return (1); 419 } 420 421 int 422 vmt_match(struct device *parent, void *match, void *aux) 423 { 424 struct pv_attach_args *pva = aux; 425 struct pvbus_hv *hv = &pva->pva_hv[PVBUS_VMWARE]; 426 427 if (hv->hv_base == 0) 428 return (0); 429 if (!vmt_probe()) 430 return (0); 431 432 return (1); 433 } 434 435 void 436 vmt_attach(struct device *parent, struct device *self, void *aux) 437 { 438 struct vmt_softc *sc = (struct vmt_softc *)self; 439 struct pv_attach_args *pva = aux; 440 struct pvbus_hv *hv = &pva->pva_hv[PVBUS_VMWARE]; 441 442 printf("\n"); 443 sc->sc_rpc_buf = malloc(VMT_RPC_BUFLEN, M_DEVBUF, M_NOWAIT); 444 if (sc->sc_rpc_buf == NULL) { 445 printf("%s: unable to allocate buffer for RPC\n", 446 DEVNAME(sc)); 447 return; 448 } 449 450 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) { 451 printf("%s: failed to open backdoor RPC channel " 452 "(TCLO protocol)\n", DEVNAME(sc)); 453 goto free; 454 } 455 456 /* don't know if this is important at all yet */ 457 if (vm_rpc_send_rpci_tx(sc, 458 "tools.capability.hgfs_server toolbox 1") != 0) { 459 printf(": failed to set HGFS server capability\n"); 460 goto free; 461 } 462 463 strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname, 464 sizeof(sc->sc_sensordev.xname)); 465 466 sc->sc_sensor.type = SENSOR_TIMEDELTA; 467 sc->sc_sensor.status = SENSOR_S_UNKNOWN; 468 469 sensor_attach(&sc->sc_sensordev, &sc->sc_sensor); 470 sensordev_install(&sc->sc_sensordev); 471 472 config_mountroot(self, vmt_tick_hook); 473 474 timeout_set(&sc->sc_tclo_tick, vmt_tclo_tick, sc); 475 timeout_add_sec(&sc->sc_tclo_tick, 1); 476 sc->sc_tclo_ping = 1; 477 478 task_set(&sc->sc_nicinfo_task, vmt_nicinfo_task, sc); 479 480 /* pvbus(4) key/value interface */ 481 hv->hv_kvop = vmt_kvop; 482 hv->hv_arg = sc; 483 484 return; 485 486 free: 487 free(sc->sc_rpc_buf, M_DEVBUF, VMT_RPC_BUFLEN); 488 } 489 490 int 491 vmt_kvop(void *arg, int op, char *key, char *value, size_t valuelen) 492 { 493 struct vmt_softc *sc = arg; 494 char *buf = NULL, *ptr; 495 size_t bufsz; 496 int error = 0; 497 498 bufsz = VMT_RPC_BUFLEN; 499 buf = malloc(bufsz, M_TEMP, M_WAITOK | M_ZERO); 500 501 switch (op) { 502 case PVBUS_KVWRITE: 503 if ((size_t)snprintf(buf, bufsz, "info-set %s %s", 504 key, value) >= bufsz) { 505 DPRINTF("%s: write command too long", DEVNAME(sc)); 506 error = EINVAL; 507 goto done; 508 } 509 break; 510 case PVBUS_KVREAD: 511 if ((size_t)snprintf(buf, bufsz, "info-get %s", 512 key) >= bufsz) { 513 DPRINTF("%s: read command too long", DEVNAME(sc)); 514 error = EINVAL; 515 goto done; 516 } 517 break; 518 default: 519 error = EOPNOTSUPP; 520 goto done; 521 } 522 523 if (vm_rpc_send_rpci_tx(sc, "%s", buf) != 0) { 524 DPRINTF("%s: error sending command: %s\n", DEVNAME(sc), buf); 525 sc->sc_rpc_error = 1; 526 error = EIO; 527 goto done; 528 } 529 530 if (vm_rpci_response_successful(sc) == 0) { 531 DPRINTF("%s: host rejected command: %s\n", DEVNAME(sc), buf); 532 error = EINVAL; 533 goto done; 534 } 535 536 /* skip response that was tested in vm_rpci_response_successful() */ 537 ptr = sc->sc_rpc_buf + 2; 538 539 /* might truncat, copy anyway but return error */ 540 if (strlcpy(value, ptr, valuelen) >= valuelen) 541 error = ENOMEM; 542 543 done: 544 free(buf, M_TEMP, bufsz); 545 return (error); 546 } 547 548 void 549 vmt_resume(void) 550 { 551 struct vm_backdoor frame; 552 extern void rdrand(void *); 553 554 bzero(&frame, sizeof(frame)); 555 frame.eax.word = VM_MAGIC; 556 frame.ecx.part.low = VM_CMD_GET_TIME_FULL; 557 frame.edx.part.low = VM_PORT_CMD; 558 vm_cmd(&frame); 559 560 rdrand(NULL); 561 enqueue_randomness(frame.eax.word); 562 enqueue_randomness(frame.esi.word); 563 enqueue_randomness(frame.edx.word); 564 enqueue_randomness(frame.ebx.word); 565 resume_randomness(NULL, 0); 566 } 567 568 int 569 vmt_activate(struct device *self, int act) 570 { 571 int rv = 0; 572 573 switch (act) { 574 case DVACT_POWERDOWN: 575 vmt_shutdown(self); 576 break; 577 case DVACT_RESUME: 578 vmt_resume(); 579 break; 580 } 581 return (rv); 582 } 583 584 585 void 586 vmt_update_guest_uptime(struct vmt_softc *sc) 587 { 588 /* host wants uptime in hundredths of a second */ 589 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %lld00", 590 VM_GUEST_INFO_UPTIME, (long long)getuptime()) != 0) { 591 DPRINTF("%s: unable to set guest uptime", DEVNAME(sc)); 592 sc->sc_rpc_error = 1; 593 } 594 } 595 596 void 597 vmt_clear_guest_info(struct vmt_softc *sc) 598 { 599 if (sc->sc_nic_info_size != 0) { 600 free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size); 601 sc->sc_nic_info = NULL; 602 sc->sc_nic_info_size = 0; 603 } 604 sc->sc_hostname[0] = '\0'; 605 sc->sc_set_guest_os = 0; 606 } 607 608 void 609 vmt_update_guest_info(struct vmt_softc *sc) 610 { 611 if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) { 612 strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)); 613 614 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s", 615 VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) { 616 DPRINTF("%s: unable to set hostname", DEVNAME(sc)); 617 sc->sc_rpc_error = 1; 618 } 619 } 620 621 if (sc->sc_set_guest_os == 0) { 622 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s %s %s", 623 VM_GUEST_INFO_OS_NAME_FULL, 624 ostype, osrelease, osversion) != 0) { 625 DPRINTF("%s: unable to set full guest OS", DEVNAME(sc)); 626 sc->sc_rpc_error = 1; 627 } 628 629 /* 630 * Host doesn't like it if we send an OS name it doesn't 631 * recognise, so use the closest match, which happens 632 * to be FreeBSD. 633 */ 634 635 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s", 636 VM_GUEST_INFO_OS_NAME, "FreeBSD") != 0) { 637 DPRINTF("%s: unable to set guest OS", DEVNAME(sc)); 638 sc->sc_rpc_error = 1; 639 } 640 641 sc->sc_set_guest_os = 1; 642 } 643 644 task_add(systq, &sc->sc_nicinfo_task); 645 } 646 647 void 648 vmt_tick_hook(struct device *self) 649 { 650 struct vmt_softc *sc = (struct vmt_softc *)self; 651 652 timeout_set(&sc->sc_tick, vmt_tick, sc); 653 vmt_tick(sc); 654 } 655 656 void 657 vmt_tick(void *xarg) 658 { 659 struct vmt_softc *sc = xarg; 660 struct vm_backdoor frame; 661 struct timeval *guest = &sc->sc_sensor.tv; 662 struct timeval host, diff; 663 664 microtime(guest); 665 666 bzero(&frame, sizeof(frame)); 667 frame.eax.word = VM_MAGIC; 668 frame.ecx.part.low = VM_CMD_GET_TIME_FULL; 669 frame.edx.part.low = VM_PORT_CMD; 670 vm_cmd(&frame); 671 672 if (frame.eax.word != 0xffffffff) { 673 host.tv_sec = ((uint64_t)frame.esi.word << 32) | frame.edx.word; 674 host.tv_usec = frame.ebx.word; 675 676 timersub(guest, &host, &diff); 677 678 sc->sc_sensor.value = (u_int64_t)diff.tv_sec * 1000000000LL + 679 (u_int64_t)diff.tv_usec * 1000LL; 680 sc->sc_sensor.status = SENSOR_S_OK; 681 } else { 682 sc->sc_sensor.status = SENSOR_S_UNKNOWN; 683 } 684 685 vmt_update_guest_info(sc); 686 vmt_update_guest_uptime(sc); 687 688 timeout_add_sec(&sc->sc_tick, 15); 689 } 690 691 void 692 vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state) 693 { 694 if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d", 695 success, state) != 0) { 696 DPRINTF("%s: unable to send state change result\n", 697 DEVNAME(sc)); 698 sc->sc_rpc_error = 1; 699 } 700 } 701 702 void 703 vmt_do_shutdown(struct vmt_softc *sc) 704 { 705 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT); 706 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK); 707 pvbus_shutdown(&sc->sc_dev); 708 } 709 710 void 711 vmt_do_reboot(struct vmt_softc *sc) 712 { 713 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT); 714 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK); 715 pvbus_reboot(&sc->sc_dev); 716 } 717 718 void 719 vmt_shutdown(void *arg) 720 { 721 struct vmt_softc *sc = arg; 722 723 if (vm_rpc_send_rpci_tx(sc, 724 "tools.capability.hgfs_server toolbox 0") != 0) { 725 DPRINTF("%s: failed to disable hgfs server capability\n", 726 DEVNAME(sc)); 727 } 728 729 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) { 730 DPRINTF("%s: failed to send shutdown ping\n", DEVNAME(sc)); 731 } 732 733 vm_rpc_close(&sc->sc_tclo_rpc); 734 } 735 736 void 737 vmt_tclo_reset(struct vmt_softc *sc) 738 { 739 if (sc->sc_rpc_error != 0) { 740 DPRINTF("%s: resetting rpc\n", DEVNAME(sc)); 741 vm_rpc_close(&sc->sc_tclo_rpc); 742 743 /* reopen and send the reset reply next time around */ 744 sc->sc_rpc_error = 1; 745 return; 746 } 747 748 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) { 749 DPRINTF("%s: failed to send reset reply\n", DEVNAME(sc)); 750 sc->sc_rpc_error = 1; 751 } 752 } 753 754 void 755 vmt_tclo_ping(struct vmt_softc *sc) 756 { 757 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 758 DPRINTF("%s: error sending ping response\n", DEVNAME(sc)); 759 sc->sc_rpc_error = 1; 760 } 761 } 762 763 void 764 vmt_tclo_halt(struct vmt_softc *sc) 765 { 766 vmt_do_shutdown(sc); 767 } 768 769 void 770 vmt_tclo_reboot(struct vmt_softc *sc) 771 { 772 vmt_do_reboot(sc); 773 } 774 775 void 776 vmt_tclo_poweron(struct vmt_softc *sc) 777 { 778 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON); 779 780 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 781 DPRINTF("%s: error sending poweron response\n", DEVNAME(sc)); 782 sc->sc_rpc_error = 1; 783 } 784 } 785 786 void 787 vmt_tclo_suspend(struct vmt_softc *sc) 788 { 789 log(LOG_KERN | LOG_NOTICE, 790 "VMware guest entering suspended state\n"); 791 792 suspend_randomness(); 793 794 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND); 795 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 796 DPRINTF("%s: error sending suspend response\n", DEVNAME(sc)); 797 sc->sc_rpc_error = 1; 798 } 799 } 800 801 void 802 vmt_tclo_resume(struct vmt_softc *sc) 803 { 804 log(LOG_KERN | LOG_NOTICE, 805 "VMware guest resuming from suspended state\n"); 806 807 /* force guest info update */ 808 vmt_clear_guest_info(sc); 809 vmt_update_guest_info(sc); 810 vmt_resume(); 811 812 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME); 813 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 814 DPRINTF("%s: error sending resume response\n", DEVNAME(sc)); 815 sc->sc_rpc_error = 1; 816 } 817 } 818 819 void 820 vmt_tclo_capreg(struct vmt_softc *sc) 821 { 822 /* don't know if this is important at all */ 823 if (vm_rpc_send_rpci_tx(sc, 824 "vmx.capability.unified_loop toolbox") != 0) { 825 DPRINTF("%s: unable to set unified loop\n", DEVNAME(sc)); 826 sc->sc_rpc_error = 1; 827 } 828 829 if (vm_rpci_response_successful(sc) == 0) { 830 DPRINTF("%s: host rejected unified loop setting\n", 831 DEVNAME(sc)); 832 } 833 834 /* the trailing space is apparently important here */ 835 if (vm_rpc_send_rpci_tx(sc, 836 "tools.capability.statechange ") != 0) { 837 DPRINTF("%s: unable to send statechange capability\n", 838 DEVNAME(sc)); 839 sc->sc_rpc_error = 1; 840 } 841 842 if (vm_rpci_response_successful(sc) == 0) { 843 DPRINTF("%s: host rejected statechange capability\n", 844 DEVNAME(sc)); 845 } 846 847 if (vm_rpc_send_rpci_tx(sc, "tools.set.version %u", 848 VM_VERSION_UNMANAGED) != 0) { 849 DPRINTF("%s: unable to set tools version\n", 850 DEVNAME(sc)); 851 sc->sc_rpc_error = 1; 852 } 853 854 vmt_clear_guest_info(sc); 855 vmt_update_guest_uptime(sc); 856 857 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) { 858 DPRINTF("%s: error sending capabilities_register" 859 " response\n", DEVNAME(sc)); 860 sc->sc_rpc_error = 1; 861 } 862 } 863 864 void 865 vmt_tclo_broadcastip(struct vmt_softc *sc) 866 { 867 struct ifnet *iface; 868 struct sockaddr_in *guest_ip; 869 870 /* find first available ipv4 address */ 871 guest_ip = NULL; 872 TAILQ_FOREACH(iface, &ifnet, if_list) { 873 struct ifaddr *iface_addr; 874 875 /* skip loopback */ 876 if (strncmp(iface->if_xname, "lo", 2) == 0 && 877 iface->if_xname[2] >= '0' && 878 iface->if_xname[2] <= '9') { 879 continue; 880 } 881 882 TAILQ_FOREACH(iface_addr, &iface->if_addrlist, 883 ifa_list) { 884 if (iface_addr->ifa_addr->sa_family != AF_INET) 885 continue; 886 887 guest_ip = satosin(iface_addr->ifa_addr); 888 break; 889 } 890 } 891 892 if (guest_ip != NULL) { 893 char ip[INET_ADDRSTRLEN]; 894 895 inet_ntop(AF_INET, &guest_ip->sin_addr, ip, sizeof(ip)); 896 if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s", 897 ip) != 0) { 898 DPRINTF("%s: unable to send guest IP address\n", 899 DEVNAME(sc)); 900 sc->sc_rpc_error = 1; 901 } 902 903 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 904 VM_RPC_REPLY_OK) != 0) { 905 DPRINTF("%s: error sending broadcastIP" 906 " response\n", DEVNAME(sc)); 907 sc->sc_rpc_error = 1; 908 } 909 } else { 910 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 911 VM_RPC_REPLY_ERROR_IP_ADDR) != 0) { 912 DPRINTF("%s: error sending broadcastIP" 913 " error response\n", DEVNAME(sc)); 914 sc->sc_rpc_error = 1; 915 } 916 } 917 } 918 919 void 920 vmt_set_backup_status(struct vmt_softc *sc, const char *state, int code, 921 const char *desc) 922 { 923 if (vm_rpc_send_rpci_tx(sc, "vmbackup.eventSet %s %d %s", 924 state, code, desc) != 0) { 925 DPRINTF("%s: setting backup status failed\n", DEVNAME(sc)); 926 } 927 } 928 929 void 930 vmt_quiesce_task(void *data) 931 { 932 struct vmt_softc *sc = data; 933 int err; 934 935 DPRINTF("%s: quiescing filesystems for backup\n", DEVNAME(sc)); 936 err = vfs_stall(curproc, 1); 937 if (err != 0) { 938 printf("%s: unable to quiesce filesystems\n", DEVNAME(sc)); 939 vfs_stall(curproc, 0); 940 941 vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_SYNC_ERROR, 942 "vfs_stall failed"); 943 vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, ""); 944 sc->sc_quiesce = 0; 945 return; 946 } 947 948 DPRINTF("%s: filesystems quiesced\n", DEVNAME(sc)); 949 vmt_set_backup_status(sc, "prov.snapshotCommit", VM_BACKUP_SUCCESS, ""); 950 } 951 952 void 953 vmt_quiesce_done_task(void *data) 954 { 955 struct vmt_softc *sc = data; 956 957 vfs_stall(curproc, 0); 958 959 if (sc->sc_quiesce == -1) 960 vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_REMOTE_ABORT, 961 ""); 962 963 vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, ""); 964 sc->sc_quiesce = 0; 965 } 966 967 void 968 vmt_tclo_abortbackup(struct vmt_softc *sc) 969 { 970 const char *reply = VM_RPC_REPLY_OK; 971 972 if (sc->sc_quiesce > 0) { 973 DPRINTF("%s: aborting backup\n", DEVNAME(sc)); 974 sc->sc_quiesce = -1; 975 task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc); 976 task_add(systq, &sc->sc_quiesce_task); 977 } else { 978 DPRINTF("%s: can't abort, no backup in progress\n", 979 DEVNAME(sc)); 980 reply = VM_RPC_REPLY_ERROR; 981 } 982 983 if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) { 984 DPRINTF("%s: error sending vmbackup.abort reply\n", 985 DEVNAME(sc)); 986 sc->sc_rpc_error = 1; 987 } 988 } 989 990 void 991 vmt_tclo_startbackup(struct vmt_softc *sc) 992 { 993 const char *reply = VM_RPC_REPLY_OK; 994 995 if (sc->sc_quiesce == 0) { 996 DPRINTF("%s: starting quiesce\n", DEVNAME(sc)); 997 vmt_set_backup_status(sc, "reset", VM_BACKUP_SUCCESS, ""); 998 999 task_set(&sc->sc_quiesce_task, vmt_quiesce_task, sc); 1000 task_add(systq, &sc->sc_quiesce_task); 1001 sc->sc_quiesce = 1; 1002 } else { 1003 DPRINTF("%s: can't start backup, already in progress\n", 1004 DEVNAME(sc)); 1005 reply = VM_RPC_REPLY_ERROR; 1006 } 1007 1008 if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) { 1009 DPRINTF("%s: error sending vmbackup.start reply\n", 1010 DEVNAME(sc)); 1011 sc->sc_rpc_error = 1; 1012 } 1013 } 1014 1015 void 1016 vmt_tclo_backupdone(struct vmt_softc *sc) 1017 { 1018 const char *reply = VM_RPC_REPLY_OK; 1019 if (sc->sc_quiesce > 0) { 1020 DPRINTF("%s: backup complete\n", DEVNAME(sc)); 1021 task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc); 1022 task_add(systq, &sc->sc_quiesce_task); 1023 } else { 1024 DPRINTF("%s: got backup complete, but not doing a backup\n", 1025 DEVNAME(sc)); 1026 reply = VM_RPC_REPLY_ERROR; 1027 } 1028 1029 if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) { 1030 DPRINTF("%s: error sending vmbackup.snapshotDone reply\n", 1031 DEVNAME(sc)); 1032 sc->sc_rpc_error = 1; 1033 } 1034 } 1035 1036 int 1037 vmt_tclo_process(struct vmt_softc *sc, const char *name) 1038 { 1039 int i; 1040 1041 /* Search for rpc command and call handler */ 1042 for (i = 0; vmt_tclo_rpc[i].name != NULL; i++) { 1043 if (strcmp(vmt_tclo_rpc[i].name, sc->sc_rpc_buf) == 0) { 1044 vmt_tclo_rpc[i].cb(sc); 1045 return (0); 1046 } 1047 } 1048 1049 DPRINTF("%s: unknown command: \"%s\"\n", DEVNAME(sc), name); 1050 1051 return (-1); 1052 } 1053 1054 void 1055 vmt_tclo_tick(void *xarg) 1056 { 1057 struct vmt_softc *sc = xarg; 1058 u_int32_t rlen; 1059 u_int16_t ack; 1060 int delay; 1061 1062 /* By default, poll every second for new messages */ 1063 delay = 1; 1064 1065 if (sc->sc_quiesce > 0) { 1066 /* abort quiesce if it's taking too long */ 1067 if (sc->sc_quiesce++ == VM_BACKUP_TIMEOUT) { 1068 printf("%s: aborting quiesce\n", DEVNAME(sc)); 1069 sc->sc_quiesce = -1; 1070 task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, 1071 sc); 1072 task_add(systq, &sc->sc_quiesce_task); 1073 } else 1074 vmt_set_backup_status(sc, "req.keepAlive", 1075 VM_BACKUP_SUCCESS, ""); 1076 } 1077 1078 /* reopen tclo channel if it's currently closed */ 1079 if (sc->sc_tclo_rpc.channel == 0 && 1080 sc->sc_tclo_rpc.cookie1 == 0 && 1081 sc->sc_tclo_rpc.cookie2 == 0) { 1082 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) { 1083 DPRINTF("%s: unable to reopen TCLO channel\n", 1084 DEVNAME(sc)); 1085 delay = 15; 1086 goto out; 1087 } 1088 1089 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 1090 VM_RPC_RESET_REPLY) != 0) { 1091 DPRINTF("%s: failed to send reset reply\n", 1092 DEVNAME(sc)); 1093 sc->sc_rpc_error = 1; 1094 goto out; 1095 } else { 1096 sc->sc_rpc_error = 0; 1097 } 1098 } 1099 1100 if (sc->sc_tclo_ping) { 1101 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) { 1102 DPRINTF("%s: failed to send TCLO outgoing ping\n", 1103 DEVNAME(sc)); 1104 sc->sc_rpc_error = 1; 1105 goto out; 1106 } 1107 } 1108 1109 if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) { 1110 DPRINTF("%s: failed to get length of incoming TCLO data\n", 1111 DEVNAME(sc)); 1112 sc->sc_rpc_error = 1; 1113 goto out; 1114 } 1115 1116 if (rlen == 0) { 1117 sc->sc_tclo_ping = 1; 1118 goto out; 1119 } 1120 1121 if (rlen >= VMT_RPC_BUFLEN) { 1122 rlen = VMT_RPC_BUFLEN - 1; 1123 } 1124 if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) { 1125 DPRINTF("%s: failed to get incoming TCLO data\n", DEVNAME(sc)); 1126 sc->sc_rpc_error = 1; 1127 goto out; 1128 } 1129 sc->sc_tclo_ping = 0; 1130 1131 /* The VM host can queue multiple messages; continue without delay */ 1132 delay = 0; 1133 1134 if (vmt_tclo_process(sc, sc->sc_rpc_buf) != 0) { 1135 if (vm_rpc_send_str(&sc->sc_tclo_rpc, 1136 VM_RPC_REPLY_ERROR) != 0) { 1137 DPRINTF("%s: error sending unknown command reply\n", 1138 DEVNAME(sc)); 1139 sc->sc_rpc_error = 1; 1140 } 1141 } 1142 1143 if (sc->sc_rpc_error == 1) { 1144 /* On error, give time to recover and wait a second */ 1145 delay = 1; 1146 } 1147 1148 out: 1149 timeout_add_sec(&sc->sc_tclo_tick, delay); 1150 } 1151 1152 size_t 1153 vmt_xdr_ifaddr(struct ifaddr *ifa, char *data) 1154 { 1155 struct sockaddr_in *sin; 1156 struct vm_nicinfo_addr_v4 v4; 1157 #ifdef INET6 1158 struct sockaddr_in6 *sin6; 1159 struct vm_nicinfo_addr_v6 v6; 1160 #endif 1161 1162 /* skip loopback addresses and anything that isn't ipv4/v6 */ 1163 switch (ifa->ifa_addr->sa_family) { 1164 case AF_INET: 1165 sin = satosin(ifa->ifa_addr); 1166 if ((ntohl(sin->sin_addr.s_addr) >> 1167 IN_CLASSA_NSHIFT) != IN_LOOPBACKNET) { 1168 if (data != NULL) { 1169 memset(&v4, 0, sizeof(v4)); 1170 htobem32(&v4.v4_addr_type, 1171 VM_NICINFO_ADDR_IPV4); 1172 htobem32(&v4.v4_addr_len, 1173 sizeof(struct in_addr)); 1174 memcpy(&v4.v4_addr, &sin->sin_addr.s_addr, 1175 sizeof(struct in_addr)); 1176 htobem32(&v4.v4_prefix_len, 1177 rtable_satoplen(AF_INET, ifa->ifa_netmask)); 1178 memcpy(data, &v4, sizeof(v4)); 1179 } 1180 return (sizeof (v4)); 1181 } 1182 break; 1183 1184 #ifdef INET6 1185 case AF_INET6: 1186 sin6 = satosin6(ifa->ifa_addr); 1187 if (!IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) && 1188 !IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { 1189 if (data != NULL) { 1190 memset(&v6, 0, sizeof(v6)); 1191 htobem32(&v6.v6_addr_type, 1192 VM_NICINFO_ADDR_IPV6); 1193 htobem32(&v6.v6_addr_len, 1194 sizeof(sin6->sin6_addr)); 1195 memcpy(&v6.v6_addr, &sin6->sin6_addr, 1196 sizeof(sin6->sin6_addr)); 1197 htobem32(&v6.v6_prefix_len, 1198 rtable_satoplen(AF_INET6, 1199 ifa->ifa_netmask)); 1200 memcpy(data, &v6, sizeof(v6)); 1201 } 1202 return (sizeof (v6)); 1203 } 1204 break; 1205 #endif 1206 1207 default: 1208 break; 1209 } 1210 1211 return (0); 1212 } 1213 1214 size_t 1215 vmt_xdr_nic_entry(struct ifnet *iface, char *data) 1216 { 1217 struct ifaddr *iface_addr; 1218 struct sockaddr_dl *sdl; 1219 struct vm_nicinfo_nic nic; 1220 struct vm_nicinfo_nic_nomac nnic; 1221 char *nicdata; 1222 const char *mac; 1223 size_t addrsize, total; 1224 int addrs; 1225 1226 total = 0; 1227 addrs = 0; 1228 1229 /* work out if we have a mac address */ 1230 sdl = iface->if_sadl; 1231 if (sdl != NULL && sdl->sdl_alen && 1232 (sdl->sdl_type == IFT_ETHER || sdl->sdl_type == IFT_CARP)) 1233 mac = ether_sprintf(sdl->sdl_data + sdl->sdl_nlen); 1234 else 1235 mac = NULL; 1236 1237 if (data != NULL) { 1238 nicdata = data; 1239 if (mac != NULL) 1240 data += sizeof(nic); 1241 else 1242 data += sizeof(nnic); 1243 } 1244 1245 TAILQ_FOREACH(iface_addr, &iface->if_addrlist, ifa_list) { 1246 addrsize = vmt_xdr_ifaddr(iface_addr, data); 1247 if (addrsize == 0) 1248 continue; 1249 1250 if (data != NULL) 1251 data += addrsize; 1252 total += addrsize; 1253 addrs++; 1254 if (addrs == VM_NICINFO_MAX_ADDRS) 1255 break; 1256 } 1257 1258 if (addrs == 0) 1259 return (0); 1260 1261 if (data != NULL) { 1262 /* fill in mac address, if any */ 1263 if (mac != NULL) { 1264 memset(&nic, 0, sizeof(nic)); 1265 htobem32(&nic.ni_mac_len, strlen(mac)); 1266 strncpy(nic.ni_mac, mac, VM_NICINFO_MAC_LEN); 1267 htobem32(&nic.ni_num_addrs, addrs); 1268 memcpy(nicdata, &nic, sizeof(nic)); 1269 } else { 1270 nnic.nn_mac_len = 0; 1271 htobem32(&nnic.nn_num_addrs, addrs); 1272 memcpy(nicdata, &nnic, sizeof(nnic)); 1273 } 1274 1275 /* we don't actually set anything in vm_nicinfo_nic_post */ 1276 } 1277 1278 if (mac != NULL) 1279 total += sizeof(nic); 1280 else 1281 total += sizeof(nnic); 1282 total += sizeof(struct vm_nicinfo_nic_post); 1283 return (total); 1284 } 1285 1286 size_t 1287 vmt_xdr_nic_info(char *data) 1288 { 1289 struct ifnet *iface; 1290 struct vm_nicinfo_nic_list nl; 1291 size_t total, nictotal; 1292 char *listdata = NULL; 1293 int nics; 1294 1295 NET_ASSERT_LOCKED(); 1296 1297 total = sizeof(nl); 1298 if (data != NULL) { 1299 listdata = data; 1300 data += sizeof(nl); 1301 } 1302 1303 nics = 0; 1304 TAILQ_FOREACH(iface, &ifnet, if_list) { 1305 nictotal = vmt_xdr_nic_entry(iface, data); 1306 if (nictotal == 0) 1307 continue; 1308 1309 if (data != NULL) 1310 data += nictotal; 1311 1312 total += nictotal; 1313 nics++; 1314 if (nics == VM_NICINFO_MAX_NICS) 1315 break; 1316 } 1317 1318 if (listdata != NULL) { 1319 memset(&nl, 0, sizeof(nl)); 1320 htobem32(&nl.nl_version, VM_NICINFO_VERSION); 1321 htobem32(&nl.nl_nic_list, 1); 1322 htobem32(&nl.nl_num_nics, nics); 1323 memcpy(listdata, &nl, sizeof(nl)); 1324 } 1325 1326 /* we don't actually set anything in vm_nicinfo_nic_list_post */ 1327 total += sizeof(struct vm_nicinfo_nic_list_post); 1328 1329 return (total); 1330 } 1331 1332 void 1333 vmt_nicinfo_task(void *data) 1334 { 1335 struct vmt_softc *sc = data; 1336 size_t nic_info_size; 1337 char *nic_info; 1338 1339 NET_LOCK(); 1340 1341 nic_info_size = vmt_xdr_nic_info(NULL) + sizeof(VM_NICINFO_CMD) - 1; 1342 nic_info = malloc(nic_info_size, M_DEVBUF, M_WAITOK | M_ZERO); 1343 1344 strncpy(nic_info, VM_NICINFO_CMD, nic_info_size); 1345 vmt_xdr_nic_info(nic_info + sizeof(VM_NICINFO_CMD) - 1); 1346 1347 NET_UNLOCK(); 1348 1349 if (nic_info_size != sc->sc_nic_info_size || 1350 (memcmp(nic_info, sc->sc_nic_info, nic_info_size) != 0)) { 1351 if (vm_rpc_send_rpci_tx_buf(sc, nic_info, 1352 nic_info_size) != 0) { 1353 DPRINTF("%s: unable to send nic info", 1354 DEVNAME(sc)); 1355 sc->sc_rpc_error = 1; 1356 } 1357 1358 free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size); 1359 sc->sc_nic_info = nic_info; 1360 sc->sc_nic_info_size = nic_info_size; 1361 } else { 1362 free(nic_info, M_DEVBUF, nic_info_size); 1363 } 1364 } 1365 1366 #define BACKDOOR_OP_I386(op, frame) \ 1367 __asm__ volatile ( \ 1368 "pushal;" \ 1369 "pushl %%eax;" \ 1370 "movl 0x18(%%eax), %%ebp;" \ 1371 "movl 0x14(%%eax), %%edi;" \ 1372 "movl 0x10(%%eax), %%esi;" \ 1373 "movl 0x0c(%%eax), %%edx;" \ 1374 "movl 0x08(%%eax), %%ecx;" \ 1375 "movl 0x04(%%eax), %%ebx;" \ 1376 "movl 0x00(%%eax), %%eax;" \ 1377 op \ 1378 "xchgl %%eax, 0x00(%%esp);" \ 1379 "movl %%ebp, 0x18(%%eax);" \ 1380 "movl %%edi, 0x14(%%eax);" \ 1381 "movl %%esi, 0x10(%%eax);" \ 1382 "movl %%edx, 0x0c(%%eax);" \ 1383 "movl %%ecx, 0x08(%%eax);" \ 1384 "movl %%ebx, 0x04(%%eax);" \ 1385 "popl 0x00(%%eax);" \ 1386 "popal;" \ 1387 ::"a"(frame) \ 1388 ) 1389 1390 #define BACKDOOR_OP_AMD64(op, frame) \ 1391 __asm__ volatile ( \ 1392 "pushq %%rbp; \n\t" \ 1393 "pushq %%rax; \n\t" \ 1394 "movq 0x30(%%rax), %%rbp; \n\t" \ 1395 "movq 0x28(%%rax), %%rdi; \n\t" \ 1396 "movq 0x20(%%rax), %%rsi; \n\t" \ 1397 "movq 0x18(%%rax), %%rdx; \n\t" \ 1398 "movq 0x10(%%rax), %%rcx; \n\t" \ 1399 "movq 0x08(%%rax), %%rbx; \n\t" \ 1400 "movq 0x00(%%rax), %%rax; \n\t" \ 1401 op "\n\t" \ 1402 "xchgq %%rax, 0x00(%%rsp); \n\t" \ 1403 "movq %%rbp, 0x30(%%rax); \n\t" \ 1404 "movq %%rdi, 0x28(%%rax); \n\t" \ 1405 "movq %%rsi, 0x20(%%rax); \n\t" \ 1406 "movq %%rdx, 0x18(%%rax); \n\t" \ 1407 "movq %%rcx, 0x10(%%rax); \n\t" \ 1408 "movq %%rbx, 0x08(%%rax); \n\t" \ 1409 "popq 0x00(%%rax); \n\t" \ 1410 "popq %%rbp; \n\t" \ 1411 : /* No outputs. */ : "a" (frame) \ 1412 /* No pushal on amd64 so warn gcc about the clobbered registers. */ \ 1413 : "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \ 1414 ) 1415 1416 1417 #ifdef __i386__ 1418 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame) 1419 #else 1420 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame) 1421 #endif 1422 1423 void 1424 vm_cmd(struct vm_backdoor *frame) 1425 { 1426 BACKDOOR_OP("inl %%dx, %%eax;", frame); 1427 } 1428 1429 void 1430 vm_ins(struct vm_backdoor *frame) 1431 { 1432 BACKDOOR_OP("cld;\n\trep insb;", frame); 1433 } 1434 1435 void 1436 vm_outs(struct vm_backdoor *frame) 1437 { 1438 BACKDOOR_OP("cld;\n\trep outsb;", frame); 1439 } 1440 1441 int 1442 vm_rpc_open(struct vm_rpc *rpc, uint32_t proto) 1443 { 1444 struct vm_backdoor frame; 1445 1446 bzero(&frame, sizeof(frame)); 1447 frame.eax.word = VM_MAGIC; 1448 frame.ebx.word = proto | VM_RPC_FLAG_COOKIE; 1449 frame.ecx.part.low = VM_CMD_RPC; 1450 frame.ecx.part.high = VM_RPC_OPEN; 1451 frame.edx.part.low = VM_PORT_CMD; 1452 frame.edx.part.high = 0; 1453 1454 vm_cmd(&frame); 1455 1456 if (frame.ecx.part.high != 1 || frame.edx.part.low != 0) { 1457 /* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */ 1458 DPRINTF("vmware: open failed, eax=%08x, ecx=%08x, edx=%08x\n", 1459 frame.eax.word, frame.ecx.word, frame.edx.word); 1460 return EIO; 1461 } 1462 1463 rpc->channel = frame.edx.part.high; 1464 rpc->cookie1 = frame.esi.word; 1465 rpc->cookie2 = frame.edi.word; 1466 1467 return 0; 1468 } 1469 1470 int 1471 vm_rpc_close(struct vm_rpc *rpc) 1472 { 1473 struct vm_backdoor frame; 1474 1475 bzero(&frame, sizeof(frame)); 1476 frame.eax.word = VM_MAGIC; 1477 frame.ebx.word = 0; 1478 frame.ecx.part.low = VM_CMD_RPC; 1479 frame.ecx.part.high = VM_RPC_CLOSE; 1480 frame.edx.part.low = VM_PORT_CMD; 1481 frame.edx.part.high = rpc->channel; 1482 frame.edi.word = rpc->cookie2; 1483 frame.esi.word = rpc->cookie1; 1484 1485 vm_cmd(&frame); 1486 1487 if (frame.ecx.part.high == 0 || frame.ecx.part.low != 0) { 1488 DPRINTF("vmware: close failed, eax=%08x, ecx=%08x\n", 1489 frame.eax.word, frame.ecx.word); 1490 return EIO; 1491 } 1492 1493 rpc->channel = 0; 1494 rpc->cookie1 = 0; 1495 rpc->cookie2 = 0; 1496 1497 return 0; 1498 } 1499 1500 int 1501 vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length) 1502 { 1503 struct vm_backdoor frame; 1504 1505 /* Send the length of the command. */ 1506 bzero(&frame, sizeof(frame)); 1507 frame.eax.word = VM_MAGIC; 1508 frame.ebx.word = length; 1509 frame.ecx.part.low = VM_CMD_RPC; 1510 frame.ecx.part.high = VM_RPC_SET_LENGTH; 1511 frame.edx.part.low = VM_PORT_CMD; 1512 frame.edx.part.high = rpc->channel; 1513 frame.esi.word = rpc->cookie1; 1514 frame.edi.word = rpc->cookie2; 1515 1516 vm_cmd(&frame); 1517 1518 if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) { 1519 DPRINTF("vmware: sending length failed, eax=%08x, ecx=%08x\n", 1520 frame.eax.word, frame.ecx.word); 1521 return EIO; 1522 } 1523 1524 if (length == 0) 1525 return 0; /* Only need to poke once if command is null. */ 1526 1527 /* Send the command using enhanced RPC. */ 1528 bzero(&frame, sizeof(frame)); 1529 frame.eax.word = VM_MAGIC; 1530 frame.ebx.word = VM_RPC_ENH_DATA; 1531 frame.ecx.word = length; 1532 frame.edx.part.low = VM_PORT_RPC; 1533 frame.edx.part.high = rpc->channel; 1534 frame.ebp.word = rpc->cookie1; 1535 frame.edi.word = rpc->cookie2; 1536 #ifdef __amd64__ 1537 frame.esi.quad = (uint64_t)buf; 1538 #else 1539 frame.esi.word = (uint32_t)buf; 1540 #endif 1541 1542 vm_outs(&frame); 1543 1544 if (frame.ebx.word != VM_RPC_ENH_DATA) { 1545 /* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */ 1546 DPRINTF("vmware: send failed, ebx=%08x\n", frame.ebx.word); 1547 return EIO; 1548 } 1549 1550 return 0; 1551 } 1552 1553 int 1554 vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str) 1555 { 1556 return vm_rpc_send(rpc, str, strlen(str)); 1557 } 1558 1559 int 1560 vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length, 1561 uint16_t dataid) 1562 { 1563 struct vm_backdoor frame; 1564 1565 /* Get data using enhanced RPC. */ 1566 bzero(&frame, sizeof(frame)); 1567 frame.eax.word = VM_MAGIC; 1568 frame.ebx.word = VM_RPC_ENH_DATA; 1569 frame.ecx.word = length; 1570 frame.edx.part.low = VM_PORT_RPC; 1571 frame.edx.part.high = rpc->channel; 1572 frame.esi.word = rpc->cookie1; 1573 #ifdef __amd64__ 1574 frame.edi.quad = (uint64_t)data; 1575 #else 1576 frame.edi.word = (uint32_t)data; 1577 #endif 1578 frame.ebp.word = rpc->cookie2; 1579 1580 vm_ins(&frame); 1581 1582 /* NUL-terminate the data */ 1583 data[length] = '\0'; 1584 1585 if (frame.ebx.word != VM_RPC_ENH_DATA) { 1586 DPRINTF("vmware: get data failed, ebx=%08x\n", 1587 frame.ebx.word); 1588 return EIO; 1589 } 1590 1591 /* Acknowledge data received. */ 1592 bzero(&frame, sizeof(frame)); 1593 frame.eax.word = VM_MAGIC; 1594 frame.ebx.word = dataid; 1595 frame.ecx.part.low = VM_CMD_RPC; 1596 frame.ecx.part.high = VM_RPC_GET_END; 1597 frame.edx.part.low = VM_PORT_CMD; 1598 frame.edx.part.high = rpc->channel; 1599 frame.esi.word = rpc->cookie1; 1600 frame.edi.word = rpc->cookie2; 1601 1602 vm_cmd(&frame); 1603 1604 if (frame.ecx.part.high == 0) { 1605 DPRINTF("vmware: ack data failed, eax=%08x, ecx=%08x\n", 1606 frame.eax.word, frame.ecx.word); 1607 return EIO; 1608 } 1609 1610 return 0; 1611 } 1612 1613 int 1614 vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid) 1615 { 1616 struct vm_backdoor frame; 1617 1618 bzero(&frame, sizeof(frame)); 1619 frame.eax.word = VM_MAGIC; 1620 frame.ebx.word = 0; 1621 frame.ecx.part.low = VM_CMD_RPC; 1622 frame.ecx.part.high = VM_RPC_GET_LENGTH; 1623 frame.edx.part.low = VM_PORT_CMD; 1624 frame.edx.part.high = rpc->channel; 1625 frame.esi.word = rpc->cookie1; 1626 frame.edi.word = rpc->cookie2; 1627 1628 vm_cmd(&frame); 1629 1630 if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) { 1631 DPRINTF("vmware: get length failed, eax=%08x, ecx=%08x\n", 1632 frame.eax.word, frame.ecx.word); 1633 return EIO; 1634 } 1635 if ((frame.ecx.part.high & VM_RPC_REPLY_DORECV) == 0) { 1636 *length = 0; 1637 *dataid = 0; 1638 } else { 1639 *length = frame.ebx.word; 1640 *dataid = frame.edx.part.high; 1641 } 1642 1643 return 0; 1644 } 1645 1646 int 1647 vm_rpci_response_successful(struct vmt_softc *sc) 1648 { 1649 return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' '); 1650 } 1651 1652 int 1653 vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf, 1654 uint32_t length) 1655 { 1656 struct vm_rpc rpci; 1657 u_int32_t rlen; 1658 u_int16_t ack; 1659 int result = 0; 1660 1661 if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) { 1662 DPRINTF("%s: rpci channel open failed\n", DEVNAME(sc)); 1663 return EIO; 1664 } 1665 1666 if (vm_rpc_send(&rpci, buf, length) != 0) { 1667 DPRINTF("%s: unable to send rpci command\n", DEVNAME(sc)); 1668 result = EIO; 1669 goto out; 1670 } 1671 1672 if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) { 1673 DPRINTF("%s: failed to get length of rpci response data\n", 1674 DEVNAME(sc)); 1675 result = EIO; 1676 goto out; 1677 } 1678 1679 if (rlen > 0) { 1680 if (rlen >= VMT_RPC_BUFLEN) { 1681 rlen = VMT_RPC_BUFLEN - 1; 1682 } 1683 1684 if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) { 1685 DPRINTF("%s: failed to get rpci response data\n", 1686 DEVNAME(sc)); 1687 result = EIO; 1688 goto out; 1689 } 1690 } 1691 1692 out: 1693 if (vm_rpc_close(&rpci) != 0) { 1694 DPRINTF("%s: unable to close rpci channel\n", DEVNAME(sc)); 1695 } 1696 1697 return result; 1698 } 1699 1700 int 1701 vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...) 1702 { 1703 va_list args; 1704 int len; 1705 1706 va_start(args, fmt); 1707 len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args); 1708 va_end(args); 1709 1710 if (len >= VMT_RPC_BUFLEN) { 1711 DPRINTF("%s: rpci command didn't fit in buffer\n", DEVNAME(sc)); 1712 return EIO; 1713 } 1714 1715 return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len); 1716 } 1717 1718 #if 0 1719 struct vm_backdoor frame; 1720 1721 bzero(&frame, sizeof(frame)); 1722 1723 frame.eax.word = VM_MAGIC; 1724 frame.ecx.part.low = VM_CMD_GET_VERSION; 1725 frame.edx.part.low = VM_PORT_CMD; 1726 1727 printf("\n"); 1728 printf("eax 0x%08x\n", frame.eax.word); 1729 printf("ebx 0x%08x\n", frame.ebx.word); 1730 printf("ecx 0x%08x\n", frame.ecx.word); 1731 printf("edx 0x%08x\n", frame.edx.word); 1732 printf("ebp 0x%08x\n", frame.ebp.word); 1733 printf("edi 0x%08x\n", frame.edi.word); 1734 printf("esi 0x%08x\n", frame.esi.word); 1735 1736 vm_cmd(&frame); 1737 1738 printf("-\n"); 1739 printf("eax 0x%08x\n", frame.eax.word); 1740 printf("ebx 0x%08x\n", frame.ebx.word); 1741 printf("ecx 0x%08x\n", frame.ecx.word); 1742 printf("edx 0x%08x\n", frame.edx.word); 1743 printf("ebp 0x%08x\n", frame.ebp.word); 1744 printf("edi 0x%08x\n", frame.edi.word); 1745 printf("esi 0x%08x\n", frame.esi.word); 1746 #endif 1747 1748 /* 1749 * Notes on tracing backdoor activity in vmware-guestd: 1750 * 1751 * - Find the addresses of the inl / rep insb / rep outsb 1752 * instructions used to perform backdoor operations. 1753 * One way to do this is to disassemble vmware-guestd: 1754 * 1755 * $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S 1756 * 1757 * and search for '<tab>in ' in the resulting file. The rep insb and 1758 * rep outsb code is directly below that. 1759 * 1760 * - Run vmware-guestd under gdb, setting up breakpoints as follows: 1761 * (the addresses shown here are the ones from VMware-server-1.0.10-203137, 1762 * the last version that actually works in FreeBSD emulation on OpenBSD) 1763 * 1764 * break *0x805497b (address of 'in' instruction) 1765 * commands 1 1766 * silent 1767 * echo INOUT\n 1768 * print/x $ecx 1769 * print/x $ebx 1770 * print/x $edx 1771 * continue 1772 * end 1773 * break *0x805497c (address of instruction after 'in') 1774 * commands 2 1775 * silent 1776 * echo ===\n 1777 * print/x $ecx 1778 * print/x $ebx 1779 * print/x $edx 1780 * echo \n 1781 * continue 1782 * end 1783 * break *0x80549b7 (address of instruction before 'rep insb') 1784 * commands 3 1785 * silent 1786 * set variable $inaddr = $edi 1787 * set variable $incount = $ecx 1788 * continue 1789 * end 1790 * break *0x80549ba (address of instruction after 'rep insb') 1791 * commands 4 1792 * silent 1793 * echo IN\n 1794 * print $incount 1795 * x/s $inaddr 1796 * echo \n 1797 * continue 1798 * end 1799 * break *0x80549fb (address of instruction before 'rep outsb') 1800 * commands 5 1801 * silent 1802 * echo OUT\n 1803 * print $ecx 1804 * x/s $esi 1805 * echo \n 1806 * continue 1807 * end 1808 * 1809 * This will produce a log of the backdoor operations, including the 1810 * data sent and received and the relevant register values. You can then 1811 * match the register values to the various constants in this file. 1812 */ 1813