1 /* $NetBSD: vmt_subr.c,v 1.10 2024/05/09 12:09:59 pho Exp $ */
2 /* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */
3
4 /*
5 * Copyright (c) 2007 David Crawshaw <david@zentus.com>
6 * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*
22 * Protocol reverse engineered by Ken Kato:
23 * https://sites.google.com/site/chitchatvmback/backdoor (dead link)
24 * https://web.archive.org/web/20230325103442/https://sites.google.com/site/chitchatvmback/backdoor (archive)
25 */
26
27 #include <sys/param.h>
28 #include <sys/types.h>
29 #include <sys/callout.h>
30 #include <sys/device.h>
31 #include <sys/endian.h>
32 #include <sys/kernel.h>
33 #include <sys/kmem.h>
34 #include <sys/module.h>
35 #include <sys/proc.h>
36 #include <sys/reboot.h>
37 #include <sys/socket.h>
38 #include <sys/sysctl.h>
39 #include <sys/syslog.h>
40 #include <sys/systm.h>
41 #include <sys/timetc.h>
42
43 #include <net/if.h>
44 #include <netinet/in.h>
45
46 #include <dev/sysmon/sysmonvar.h>
47 #include <dev/sysmon/sysmon_taskq.h>
48 #include <dev/vmt/vmtreg.h>
49 #include <dev/vmt/vmtvar.h>
50
51 /* #define VMT_DEBUG */
52
53 static int vmt_sysctl_setup_root(device_t);
54 static int vmt_sysctl_setup_clock_sync(device_t, const struct sysctlnode *);
55 static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_PROTO);
56
57 static void vm_cmd(struct vm_backdoor *);
58 static void vm_ins(struct vm_backdoor *);
59 static void vm_outs(struct vm_backdoor *);
60
61 /* Functions for communicating with the VM Host. */
62 static int vm_rpc_open(struct vm_rpc *, uint32_t);
63 static int vm_rpc_close(struct vm_rpc *);
64 static int vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t);
65 static int vm_rpc_send_str(const struct vm_rpc *, const uint8_t *);
66 static int vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *);
67 static int vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t);
68 static int vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t);
69 static int vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...)
70 __printflike(2, 3);
71 static int vm_rpci_response_successful(struct vmt_softc *);
72
73 static void vmt_tclo_state_change_success(struct vmt_softc *, int, char);
74 static void vmt_do_reboot(struct vmt_softc *);
75 static void vmt_do_shutdown(struct vmt_softc *);
76 static bool vmt_shutdown(device_t, int);
77
78 static void vmt_update_guest_info(struct vmt_softc *);
79 static void vmt_update_guest_uptime(struct vmt_softc *);
80 static void vmt_sync_guest_clock(struct vmt_softc *);
81
82 static void vmt_tick(void *);
83 static void vmt_clock_sync_tick(void *);
84 static void vmt_pswitch_event(void *);
85
86 static void vmt_tclo_tick(void *);
87 static int vmt_tclo_process(struct vmt_softc *, const char *);
88 static void vmt_tclo_reset(struct vmt_softc *);
89 static void vmt_tclo_ping(struct vmt_softc *);
90 static void vmt_tclo_halt(struct vmt_softc *);
91 static void vmt_tclo_reboot(struct vmt_softc *);
92 static void vmt_tclo_poweron(struct vmt_softc *);
93 static void vmt_tclo_suspend(struct vmt_softc *);
94 static void vmt_tclo_resume(struct vmt_softc *);
95 static void vmt_tclo_capreg(struct vmt_softc *);
96 static void vmt_tclo_broadcastip(struct vmt_softc *);
97
98 struct vmt_tclo_rpc {
99 const char *name;
100 void (*cb)(struct vmt_softc *);
101 } vmt_tclo_rpc[] = {
102 /* Keep sorted by name (case-sensitive) */
103 { "Capabilities_Register", vmt_tclo_capreg },
104 { "OS_Halt", vmt_tclo_halt },
105 { "OS_PowerOn", vmt_tclo_poweron },
106 { "OS_Reboot", vmt_tclo_reboot },
107 { "OS_Resume", vmt_tclo_resume },
108 { "OS_Suspend", vmt_tclo_suspend },
109 { "Set_Option broadcastIP 1", vmt_tclo_broadcastip },
110 { "ping", vmt_tclo_ping },
111 { "reset", vmt_tclo_reset },
112 #if 0
113 /* Various unsupported commands */
114 { "Set_Option autohide 0" },
115 { "Set_Option copypaste 1" },
116 { "Set_Option enableDnD 1" },
117 { "Set_Option enableMessageBusTunnel 0" },
118 { "Set_Option linkRootHgfsShare 0" },
119 { "Set_Option mapRootHgfsShare 0" },
120 { "Set_Option synctime 1" },
121 { "Set_Option synctime.period 0" },
122 { "Set_Option time.synchronize.tools.enable 1" },
123 { "Set_Option time.synchronize.tools.percentCorrection 0" },
124 { "Set_Option time.synchronize.tools.slewCorrection 1" },
125 { "Set_Option time.synchronize.tools.startup 1" },
126 { "Set_Option toolScripts.afterPowerOn 1" },
127 { "Set_Option toolScripts.afterResume 1" },
128 { "Set_Option toolScripts.beforePowerOff 1" },
129 { "Set_Option toolScripts.beforeSuspend 1" },
130 { "Time_Synchronize 0" },
131 { "Vix_1_Relayed_Command \"38cdcae40e075d66\"" },
132 #endif
133 { NULL, NULL },
134 };
135
136 extern char hostname[MAXHOSTNAMELEN];
137
138 static void
vmt_probe_cmd(struct vm_backdoor * frame,uint16_t cmd)139 vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd)
140 {
141 memset(frame, 0, sizeof(*frame));
142
143 frame->eax = VM_MAGIC;
144 frame->ebx = ~VM_MAGIC & VM_REG_WORD_MASK;
145 frame->ecx = VM_REG_CMD(0xffff, cmd);
146 frame->edx = VM_REG_CMD(0, VM_PORT_CMD);
147
148 vm_cmd(frame);
149 }
150
151 bool
vmt_probe(void)152 vmt_probe(void)
153 {
154 struct vm_backdoor frame;
155
156 vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
157 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff ||
158 __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC)
159 return false;
160
161 vmt_probe_cmd(&frame, VM_CMD_GET_SPEED);
162 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == VM_MAGIC)
163 return false;
164
165 return true;
166 }
167
168 void
vmt_common_attach(struct vmt_softc * sc)169 vmt_common_attach(struct vmt_softc *sc)
170 {
171 device_t self;
172 struct vm_backdoor frame;
173 int rv;
174
175 self = sc->sc_dev;
176 sc->sc_log = NULL;
177
178 /* check again */
179 vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
180 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff ||
181 __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC) {
182 aprint_error_dev(self, "failed to get VMware version\n");
183 return;
184 }
185
186 /* show uuid */
187 {
188 struct uuid uuid;
189 uint32_t u;
190
191 vmt_probe_cmd(&frame, VM_CMD_GET_BIOS_UUID);
192 uuid.time_low =
193 bswap32(__SHIFTOUT(frame.eax, VM_REG_WORD_MASK));
194 u = bswap32(__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK));
195 uuid.time_mid = u >> 16;
196 uuid.time_hi_and_version = u;
197 u = bswap32(__SHIFTOUT(frame.ecx, VM_REG_WORD_MASK));
198 uuid.clock_seq_hi_and_reserved = u >> 24;
199 uuid.clock_seq_low = u >> 16;
200 uuid.node[0] = u >> 8;
201 uuid.node[1] = u;
202 u = bswap32(__SHIFTOUT(frame.edx, VM_REG_WORD_MASK));
203 uuid.node[2] = u >> 24;
204 uuid.node[3] = u >> 16;
205 uuid.node[4] = u >> 8;
206 uuid.node[5] = u;
207
208 uuid_snprintf(sc->sc_uuid, sizeof(sc->sc_uuid), &uuid);
209 aprint_verbose_dev(sc->sc_dev, "UUID: %s\n", sc->sc_uuid);
210 }
211
212 callout_init(&sc->sc_tick, 0);
213 callout_init(&sc->sc_tclo_tick, 0);
214 callout_init(&sc->sc_clock_sync_tick, 0);
215
216 sc->sc_clock_sync_period_seconds = VMT_CLOCK_SYNC_PERIOD_SECONDS;
217
218 rv = vmt_sysctl_setup_root(self);
219 if (rv != 0) {
220 aprint_error_dev(self, "failed to initialize sysctl "
221 "(err %d)\n", rv);
222 goto free;
223 }
224
225 sc->sc_rpc_buf = kmem_alloc(VMT_RPC_BUFLEN, KM_SLEEP);
226
227 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
228 aprint_error_dev(self, "failed to open backdoor RPC channel "
229 "(TCLO protocol)\n");
230 goto free;
231 }
232 sc->sc_tclo_rpc_open = true;
233
234 /* don't know if this is important at all yet */
235 if (vm_rpc_send_rpci_tx(sc,
236 "tools.capability.hgfs_server toolbox 1") != 0) {
237 aprint_error_dev(self,
238 "failed to set HGFS server capability\n");
239 goto free;
240 }
241
242 pmf_device_register1(self, NULL, NULL, vmt_shutdown);
243
244 sysmon_task_queue_init();
245
246 sc->sc_ev_power.ev_smpsw.smpsw_type = PSWITCH_TYPE_POWER;
247 sc->sc_ev_power.ev_smpsw.smpsw_name = device_xname(self);
248 sc->sc_ev_power.ev_code = PSWITCH_EVENT_PRESSED;
249 sysmon_pswitch_register(&sc->sc_ev_power.ev_smpsw);
250 sc->sc_ev_reset.ev_smpsw.smpsw_type = PSWITCH_TYPE_RESET;
251 sc->sc_ev_reset.ev_smpsw.smpsw_name = device_xname(self);
252 sc->sc_ev_reset.ev_code = PSWITCH_EVENT_PRESSED;
253 sysmon_pswitch_register(&sc->sc_ev_reset.ev_smpsw);
254 sc->sc_ev_sleep.ev_smpsw.smpsw_type = PSWITCH_TYPE_SLEEP;
255 sc->sc_ev_sleep.ev_smpsw.smpsw_name = device_xname(self);
256 sc->sc_ev_sleep.ev_code = PSWITCH_EVENT_RELEASED;
257 sysmon_pswitch_register(&sc->sc_ev_sleep.ev_smpsw);
258 sc->sc_smpsw_valid = true;
259
260 callout_setfunc(&sc->sc_tick, vmt_tick, sc);
261 callout_schedule(&sc->sc_tick, hz);
262
263 callout_setfunc(&sc->sc_tclo_tick, vmt_tclo_tick, sc);
264 callout_schedule(&sc->sc_tclo_tick, hz);
265 sc->sc_tclo_ping = 1;
266
267 callout_setfunc(&sc->sc_clock_sync_tick, vmt_clock_sync_tick, sc);
268 callout_schedule(&sc->sc_clock_sync_tick,
269 mstohz(sc->sc_clock_sync_period_seconds * 1000));
270
271 vmt_sync_guest_clock(sc);
272
273 return;
274
275 free:
276 if (sc->sc_rpc_buf)
277 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
278 pmf_device_register(self, NULL, NULL);
279 if (sc->sc_log)
280 sysctl_teardown(&sc->sc_log);
281 }
282
283 int
vmt_common_detach(struct vmt_softc * sc)284 vmt_common_detach(struct vmt_softc *sc)
285 {
286 if (sc->sc_tclo_rpc_open)
287 vm_rpc_close(&sc->sc_tclo_rpc);
288
289 if (sc->sc_smpsw_valid) {
290 sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw);
291 sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw);
292 sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw);
293 }
294
295 callout_halt(&sc->sc_tick, NULL);
296 callout_destroy(&sc->sc_tick);
297
298 callout_halt(&sc->sc_tclo_tick, NULL);
299 callout_destroy(&sc->sc_tclo_tick);
300
301 callout_halt(&sc->sc_clock_sync_tick, NULL);
302 callout_destroy(&sc->sc_clock_sync_tick);
303
304 if (sc->sc_rpc_buf)
305 kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
306
307 if (sc->sc_log) {
308 sysctl_teardown(&sc->sc_log);
309 sc->sc_log = NULL;
310 }
311
312 return 0;
313 }
314
315 static int
vmt_sysctl_setup_root(device_t self)316 vmt_sysctl_setup_root(device_t self)
317 {
318 const struct sysctlnode *machdep_node, *vmt_node;
319 struct vmt_softc *sc = device_private(self);
320 int rv;
321
322 rv = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node,
323 CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
324 NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
325 if (rv != 0)
326 goto fail;
327
328 rv = sysctl_createv(&sc->sc_log, 0, &machdep_node, &vmt_node,
329 0, CTLTYPE_NODE, device_xname(self), NULL,
330 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
331 if (rv != 0)
332 goto fail;
333
334 rv = sysctl_createv(&sc->sc_log, 0, &vmt_node, NULL,
335 CTLFLAG_READONLY, CTLTYPE_STRING, "uuid",
336 SYSCTL_DESCR("UUID of virtual machine"),
337 NULL, 0, sc->sc_uuid, 0,
338 CTL_CREATE, CTL_EOL);
339
340 rv = vmt_sysctl_setup_clock_sync(self, vmt_node);
341 if (rv != 0)
342 goto fail;
343
344 return 0;
345
346 fail:
347 sysctl_teardown(&sc->sc_log);
348 sc->sc_log = NULL;
349
350 return rv;
351 }
352
353 static int
vmt_sysctl_setup_clock_sync(device_t self,const struct sysctlnode * root_node)354 vmt_sysctl_setup_clock_sync(device_t self, const struct sysctlnode *root_node)
355 {
356 const struct sysctlnode *node, *period_node;
357 struct vmt_softc *sc = device_private(self);
358 int rv;
359
360 rv = sysctl_createv(&sc->sc_log, 0, &root_node, &node,
361 0, CTLTYPE_NODE, "clock_sync", NULL,
362 NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
363 if (rv != 0)
364 return rv;
365
366 rv = sysctl_createv(&sc->sc_log, 0, &node, &period_node,
367 CTLFLAG_READWRITE, CTLTYPE_INT, "period",
368 SYSCTL_DESCR("Period, in seconds, at which to update the "
369 "guest's clock"),
370 vmt_sysctl_update_clock_sync_period, 0, (void *)sc, 0,
371 CTL_CREATE, CTL_EOL);
372 return rv;
373 }
374
375 static int
vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS)376 vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS)
377 {
378 int error, period;
379 struct sysctlnode node;
380 struct vmt_softc *sc;
381
382 node = *rnode;
383 sc = (struct vmt_softc *)node.sysctl_data;
384
385 period = sc->sc_clock_sync_period_seconds;
386 node.sysctl_data = .
387 error = sysctl_lookup(SYSCTLFN_CALL(&node));
388 if (error || newp == NULL)
389 return error;
390
391 if (sc->sc_clock_sync_period_seconds != period) {
392 callout_halt(&sc->sc_clock_sync_tick, NULL);
393 sc->sc_clock_sync_period_seconds = period;
394 if (sc->sc_clock_sync_period_seconds > 0)
395 callout_schedule(&sc->sc_clock_sync_tick,
396 mstohz(sc->sc_clock_sync_period_seconds * 1000));
397 }
398 return 0;
399 }
400
401 static void
vmt_clock_sync_tick(void * xarg)402 vmt_clock_sync_tick(void *xarg)
403 {
404 struct vmt_softc *sc = xarg;
405
406 vmt_sync_guest_clock(sc);
407
408 callout_schedule(&sc->sc_clock_sync_tick,
409 mstohz(sc->sc_clock_sync_period_seconds * 1000));
410 }
411
412 static void
vmt_update_guest_uptime(struct vmt_softc * sc)413 vmt_update_guest_uptime(struct vmt_softc *sc)
414 {
415 /* host wants uptime in hundredths of a second */
416 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %" PRId64 "00",
417 VM_GUEST_INFO_UPTIME, time_uptime) != 0) {
418 device_printf(sc->sc_dev, "unable to set guest uptime\n");
419 sc->sc_rpc_error = 1;
420 }
421 }
422
423 static void
vmt_update_guest_info(struct vmt_softc * sc)424 vmt_update_guest_info(struct vmt_softc *sc)
425 {
426 if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) {
427 strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname));
428 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s",
429 VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) {
430 device_printf(sc->sc_dev, "unable to set hostname\n");
431 sc->sc_rpc_error = 1;
432 }
433 }
434
435 /*
436 * we're supposed to pass the full network address information back
437 * here, but that involves xdr (sunrpc) data encoding, which seems
438 * a bit unreasonable.
439 */
440
441 if (sc->sc_set_guest_os == 0) {
442 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s %s %s",
443 VM_GUEST_INFO_OS_NAME_FULL,
444 ostype, osrelease, machine_arch) != 0) {
445 device_printf(sc->sc_dev,
446 "unable to set full guest OS\n");
447 sc->sc_rpc_error = 1;
448 }
449
450 /*
451 * Host doesn't like it if we send an OS name it doesn't
452 * recognise, so use "other" for i386 and "other-64" for amd64.
453 */
454 if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo %d %s",
455 VM_GUEST_INFO_OS_NAME, VM_OS_NAME) != 0) {
456 device_printf(sc->sc_dev, "unable to set guest OS\n");
457 sc->sc_rpc_error = 1;
458 }
459
460 sc->sc_set_guest_os = 1;
461 }
462 }
463
464 static void
vmt_sync_guest_clock(struct vmt_softc * sc)465 vmt_sync_guest_clock(struct vmt_softc *sc)
466 {
467 struct vm_backdoor frame;
468 struct timespec ts;
469
470 memset(&frame, 0, sizeof(frame));
471 frame.eax = VM_MAGIC;
472 frame.ecx = VM_CMD_GET_TIME_FULL;
473 frame.edx = VM_REG_CMD(0, VM_PORT_CMD);
474 vm_cmd(&frame);
475
476 if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) != 0xffffffff) {
477 ts.tv_sec = ((uint64_t)(
478 __SHIFTOUT(frame.esi, VM_REG_WORD_MASK) << 32)) |
479 __SHIFTOUT(frame.edx, VM_REG_WORD_MASK);
480 ts.tv_nsec = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) * 1000;
481 tc_setclock(&ts);
482 }
483 }
484
485 static void
vmt_tick(void * xarg)486 vmt_tick(void *xarg)
487 {
488 struct vmt_softc *sc = xarg;
489
490 vmt_update_guest_info(sc);
491 vmt_update_guest_uptime(sc);
492
493 callout_schedule(&sc->sc_tick, hz * 15);
494 }
495
496 static void
vmt_tclo_state_change_success(struct vmt_softc * sc,int success,char state)497 vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state)
498 {
499 if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d",
500 success, state) != 0) {
501 device_printf(sc->sc_dev,
502 "unable to send state change result\n");
503 sc->sc_rpc_error = 1;
504 }
505 }
506
507 static void
vmt_do_shutdown(struct vmt_softc * sc)508 vmt_do_shutdown(struct vmt_softc *sc)
509 {
510 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT);
511 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
512
513 device_printf(sc->sc_dev, "host requested shutdown\n");
514 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_power);
515 }
516
517 static void
vmt_do_reboot(struct vmt_softc * sc)518 vmt_do_reboot(struct vmt_softc *sc)
519 {
520 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT);
521 vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
522
523 device_printf(sc->sc_dev, "host requested reboot\n");
524 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_reset);
525 }
526
527 static void
vmt_do_resume(struct vmt_softc * sc)528 vmt_do_resume(struct vmt_softc *sc)
529 {
530 device_printf(sc->sc_dev, "guest resuming from suspended state\n");
531
532 vmt_sync_guest_clock(sc);
533
534 /* force guest info update */
535 sc->sc_hostname[0] = '\0';
536 sc->sc_set_guest_os = 0;
537 vmt_update_guest_info(sc);
538
539 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME);
540 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
541 device_printf(sc->sc_dev, "error sending resume response\n");
542 sc->sc_rpc_error = 1;
543 }
544
545 sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_sleep);
546 }
547
548 static bool
vmt_shutdown(device_t self,int flags)549 vmt_shutdown(device_t self, int flags)
550 {
551 struct vmt_softc *sc = device_private(self);
552
553 if (vm_rpc_send_rpci_tx(sc,
554 "tools.capability.hgfs_server toolbox 0") != 0) {
555 device_printf(sc->sc_dev,
556 "failed to disable hgfs server capability\n");
557 }
558
559 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
560 device_printf(sc->sc_dev, "failed to send shutdown ping\n");
561 }
562
563 vm_rpc_close(&sc->sc_tclo_rpc);
564
565 return true;
566 }
567
568 static void
vmt_pswitch_event(void * xarg)569 vmt_pswitch_event(void *xarg)
570 {
571 struct vmt_event *ev = xarg;
572
573 sysmon_pswitch_event(&ev->ev_smpsw, ev->ev_code);
574 }
575
576 static void
vmt_tclo_reset(struct vmt_softc * sc)577 vmt_tclo_reset(struct vmt_softc *sc)
578 {
579
580 if (sc->sc_rpc_error != 0) {
581 device_printf(sc->sc_dev, "resetting rpc\n");
582 vm_rpc_close(&sc->sc_tclo_rpc);
583
584 /* reopen and send the reset reply next time around */
585 return;
586 }
587
588 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
589 device_printf(sc->sc_dev, "failed to send reset reply\n");
590 sc->sc_rpc_error = 1;
591 }
592
593 }
594
595 static void
vmt_tclo_ping(struct vmt_softc * sc)596 vmt_tclo_ping(struct vmt_softc *sc)
597 {
598
599 vmt_update_guest_info(sc);
600 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
601 device_printf(sc->sc_dev, "error sending ping response\n");
602 sc->sc_rpc_error = 1;
603 }
604 }
605
606 static void
vmt_tclo_halt(struct vmt_softc * sc)607 vmt_tclo_halt(struct vmt_softc *sc)
608 {
609
610 vmt_do_shutdown(sc);
611 }
612
613 static void
vmt_tclo_reboot(struct vmt_softc * sc)614 vmt_tclo_reboot(struct vmt_softc *sc)
615 {
616
617 vmt_do_reboot(sc);
618 }
619
620 static void
vmt_tclo_poweron(struct vmt_softc * sc)621 vmt_tclo_poweron(struct vmt_softc *sc)
622 {
623
624 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON);
625 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
626 device_printf(sc->sc_dev, "error sending poweron response\n");
627 sc->sc_rpc_error = 1;
628 }
629 }
630
631 static void
vmt_tclo_suspend(struct vmt_softc * sc)632 vmt_tclo_suspend(struct vmt_softc *sc)
633 {
634
635 log(LOG_KERN | LOG_NOTICE,
636 "VMware guest entering suspended state\n");
637
638 vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND);
639 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
640 device_printf(sc->sc_dev, "error sending suspend response\n");
641 sc->sc_rpc_error = 1;
642 }
643 }
644
645 static void
vmt_tclo_resume(struct vmt_softc * sc)646 vmt_tclo_resume(struct vmt_softc *sc)
647 {
648
649 vmt_do_resume(sc); /* XXX msaitoh extract */
650 }
651
652 static void
vmt_tclo_capreg(struct vmt_softc * sc)653 vmt_tclo_capreg(struct vmt_softc *sc)
654 {
655
656 /* don't know if this is important at all */
657 if (vm_rpc_send_rpci_tx(sc,
658 "vmx.capability.unified_loop toolbox") != 0) {
659 device_printf(sc->sc_dev, "unable to set unified loop\n");
660 sc->sc_rpc_error = 1;
661 }
662 if (vm_rpci_response_successful(sc) == 0) {
663 device_printf(sc->sc_dev,
664 "host rejected unified loop setting\n");
665 }
666
667 /* the trailing space is apparently important here */
668 if (vm_rpc_send_rpci_tx(sc,
669 "tools.capability.statechange ") != 0) {
670 device_printf(sc->sc_dev,
671 "unable to send statechange capability\n");
672 sc->sc_rpc_error = 1;
673 }
674 if (vm_rpci_response_successful(sc) == 0) {
675 device_printf(sc->sc_dev,
676 "host rejected statechange capability\n");
677 }
678
679 if (vm_rpc_send_rpci_tx(sc,
680 "tools.set.version %u", VM_VERSION_UNMANAGED) != 0) {
681 device_printf(sc->sc_dev, "unable to set tools version\n");
682 sc->sc_rpc_error = 1;
683 }
684
685 vmt_update_guest_uptime(sc);
686
687 if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
688 device_printf(sc->sc_dev,
689 "error sending capabilities_register response\n");
690 sc->sc_rpc_error = 1;
691 }
692 }
693
694 static void
vmt_tclo_broadcastip(struct vmt_softc * sc)695 vmt_tclo_broadcastip(struct vmt_softc *sc)
696 {
697 struct ifaddr *iface_addr = NULL;
698 struct ifnet *iface;
699 struct sockaddr_in *guest_ip;
700 int s;
701 struct psref psref;
702
703 /* find first available ipv4 address */
704 guest_ip = NULL;
705 s = pserialize_read_enter();
706 IFNET_READER_FOREACH(iface) {
707
708 /* skip loopback */
709 if (strncmp(iface->if_xname, "lo", 2) == 0 &&
710 iface->if_xname[2] >= '0' &&
711 iface->if_xname[2] <= '9') {
712 continue;
713 }
714
715 IFADDR_READER_FOREACH(iface_addr, iface) {
716 if (iface_addr->ifa_addr->sa_family != AF_INET) {
717 continue;
718 }
719
720 guest_ip = satosin(iface_addr->ifa_addr);
721 ifa_acquire(iface_addr, &psref);
722 goto got;
723 }
724 }
725 got:
726 pserialize_read_exit(s);
727
728 if (guest_ip != NULL) {
729 if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s",
730 inet_ntoa(guest_ip->sin_addr)) != 0) {
731 device_printf(sc->sc_dev,
732 "unable to send guest IP address\n");
733 sc->sc_rpc_error = 1;
734 }
735 ifa_release(iface_addr, &psref);
736
737 if (vm_rpc_send_str(&sc->sc_tclo_rpc,
738 VM_RPC_REPLY_OK) != 0) {
739 device_printf(sc->sc_dev,
740 "error sending broadcastIP response\n");
741 sc->sc_rpc_error = 1;
742 }
743 } else {
744 if (vm_rpc_send_str(&sc->sc_tclo_rpc,
745 VM_RPC_REPLY_ERROR_IP_ADDR) != 0) {
746 device_printf(sc->sc_dev,
747 "error sending broadcastIP"
748 " error response\n");
749 sc->sc_rpc_error = 1;
750 }
751 }
752 }
753
754 int
vmt_tclo_process(struct vmt_softc * sc,const char * name)755 vmt_tclo_process(struct vmt_softc *sc, const char *name)
756 {
757 int i;
758
759 /* Search for rpc command and call handler */
760 for (i = 0; vmt_tclo_rpc[i].name != NULL; i++) {
761 if (strcmp(vmt_tclo_rpc[i].name, sc->sc_rpc_buf) == 0) {
762 vmt_tclo_rpc[i].cb(sc);
763 return (0);
764 }
765 }
766
767 device_printf(sc->sc_dev, "unknown command: \"%s\"\n", name);
768
769 return (-1);
770 }
771
772 static void
vmt_tclo_tick(void * xarg)773 vmt_tclo_tick(void *xarg)
774 {
775 struct vmt_softc *sc = xarg;
776 u_int32_t rlen;
777 u_int16_t ack;
778 int delay;
779
780 /* By default, poll every second for new messages */
781 delay = 1;
782
783 /* reopen tclo channel if it's currently closed */
784 if (sc->sc_tclo_rpc.channel == 0 &&
785 sc->sc_tclo_rpc.cookie1 == 0 &&
786 sc->sc_tclo_rpc.cookie2 == 0) {
787 if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
788 device_printf(sc->sc_dev,
789 "unable to reopen TCLO channel\n");
790 delay = 15;
791 goto out;
792 }
793
794 if (vm_rpc_send_str(&sc->sc_tclo_rpc,
795 VM_RPC_RESET_REPLY) != 0) {
796 device_printf(sc->sc_dev,
797 "failed to send reset reply\n");
798 sc->sc_rpc_error = 1;
799 goto out;
800 } else {
801 sc->sc_rpc_error = 0;
802 }
803 }
804
805 if (sc->sc_tclo_ping) {
806 if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
807 device_printf(sc->sc_dev,
808 "failed to send TCLO outgoing ping\n");
809 sc->sc_rpc_error = 1;
810 goto out;
811 }
812 }
813
814 if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) {
815 device_printf(sc->sc_dev,
816 "failed to get length of incoming TCLO data\n");
817 sc->sc_rpc_error = 1;
818 goto out;
819 }
820
821 if (rlen == 0) {
822 sc->sc_tclo_ping = 1;
823 goto out;
824 }
825
826 if (rlen >= VMT_RPC_BUFLEN) {
827 rlen = VMT_RPC_BUFLEN - 1;
828 }
829 if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) {
830 device_printf(sc->sc_dev,
831 "failed to get incoming TCLO data\n");
832 sc->sc_rpc_error = 1;
833 goto out;
834 }
835 sc->sc_tclo_ping = 0;
836
837 /* The VM host can queue multiple messages; continue without delay */
838 delay = 0;
839
840 #ifdef VMT_DEBUG
841 printf("vmware: received message '%s'\n", sc->sc_rpc_buf);
842 #endif
843
844 if (vmt_tclo_process(sc, sc->sc_rpc_buf) != 0) {
845 if (vm_rpc_send_str(&sc->sc_tclo_rpc,
846 VM_RPC_REPLY_ERROR) != 0) {
847 device_printf(sc->sc_dev,
848 "error sending unknown command reply\n");
849 sc->sc_rpc_error = 1;
850 }
851 }
852
853 if (sc->sc_rpc_error == 1) {
854 /* On error, give time to recover and wait a second */
855 delay = 1;
856 }
857
858 out:
859 callout_schedule(&sc->sc_tclo_tick, hz * delay);
860 }
861
862 static void
vm_cmd(struct vm_backdoor * frame)863 vm_cmd(struct vm_backdoor *frame)
864 {
865 BACKDOOR_OP(BACKDOOR_OP_CMD, frame);
866 }
867
868 static void
vm_ins(struct vm_backdoor * frame)869 vm_ins(struct vm_backdoor *frame)
870 {
871 BACKDOOR_OP(BACKDOOR_OP_IN, frame);
872 }
873
874 static void
vm_outs(struct vm_backdoor * frame)875 vm_outs(struct vm_backdoor *frame)
876 {
877 BACKDOOR_OP(BACKDOOR_OP_OUT, frame);
878 }
879
880 static int
vm_rpc_open(struct vm_rpc * rpc,uint32_t proto)881 vm_rpc_open(struct vm_rpc *rpc, uint32_t proto)
882 {
883 struct vm_backdoor frame;
884
885 memset(&frame, 0, sizeof(frame));
886 frame.eax = VM_MAGIC;
887 frame.ebx = proto | VM_RPC_FLAG_COOKIE;
888 frame.ecx = VM_REG_CMD_RPC(VM_RPC_OPEN);
889 frame.edx = VM_REG_PORT_CMD(0);
890
891 vm_cmd(&frame);
892
893 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) != 1 ||
894 __SHIFTOUT(frame.edx, VM_REG_LOW_MASK) != 0) {
895 /* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */
896 printf("vmware: open failed, eax=%#"PRIxREGISTER
897 ", ecx=%#"PRIxREGISTER", edx=%#"PRIxREGISTER"\n",
898 frame.eax, frame.ecx, frame.edx);
899 return EIO;
900 }
901
902 rpc->channel = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK);
903 rpc->cookie1 = __SHIFTOUT(frame.esi, VM_REG_WORD_MASK);
904 rpc->cookie2 = __SHIFTOUT(frame.edi, VM_REG_WORD_MASK);
905
906 return 0;
907 }
908
909 static int
vm_rpc_close(struct vm_rpc * rpc)910 vm_rpc_close(struct vm_rpc *rpc)
911 {
912 struct vm_backdoor frame;
913
914 memset(&frame, 0, sizeof(frame));
915 frame.eax = VM_MAGIC;
916 frame.ebx = 0;
917 frame.ecx = VM_REG_CMD_RPC(VM_RPC_CLOSE);
918 frame.edx = VM_REG_PORT_CMD(rpc->channel);
919 frame.edi = rpc->cookie2;
920 frame.esi = rpc->cookie1;
921
922 vm_cmd(&frame);
923
924 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0 ||
925 __SHIFTOUT(frame.ecx, VM_REG_LOW_MASK) != 0) {
926 printf("vmware: close failed, "
927 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
928 frame.eax, frame.ecx);
929 return EIO;
930 }
931
932 rpc->channel = 0;
933 rpc->cookie1 = 0;
934 rpc->cookie2 = 0;
935
936 return 0;
937 }
938
939 static int
vm_rpc_send(const struct vm_rpc * rpc,const uint8_t * buf,uint32_t length)940 vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length)
941 {
942 struct vm_backdoor frame;
943
944 /* Send the length of the command. */
945 memset(&frame, 0, sizeof(frame));
946 frame.eax = VM_MAGIC;
947 frame.ebx = length;
948 frame.ecx = VM_REG_CMD_RPC(VM_RPC_SET_LENGTH);
949 frame.edx = VM_REG_PORT_CMD(rpc->channel);
950 frame.esi = rpc->cookie1;
951 frame.edi = rpc->cookie2;
952
953 vm_cmd(&frame);
954
955 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) ==
956 0) {
957 printf("vmware: sending length failed, "
958 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
959 frame.eax, frame.ecx);
960 return EIO;
961 }
962
963 if (length == 0)
964 return 0; /* Only need to poke once if command is null. */
965
966 /* Send the command using enhanced RPC. */
967 memset(&frame, 0, sizeof(frame));
968 frame.eax = VM_MAGIC;
969 frame.ebx = VM_RPC_ENH_DATA;
970 frame.ecx = length;
971 frame.edx = VM_REG_PORT_RPC(rpc->channel);
972 frame.ebp = rpc->cookie1;
973 frame.edi = rpc->cookie2;
974 frame.esi = (register_t)buf;
975
976 vm_outs(&frame);
977
978 if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) {
979 /* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */
980 printf("vmware: send failed, ebx=%#"PRIxREGISTER"\n",
981 frame.ebx);
982 return EIO;
983 }
984
985 return 0;
986 }
987
988 static int
vm_rpc_send_str(const struct vm_rpc * rpc,const uint8_t * str)989 vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str)
990 {
991 return vm_rpc_send(rpc, str, strlen(str));
992 }
993
994 static int
vm_rpc_get_data(const struct vm_rpc * rpc,char * data,uint32_t length,uint16_t dataid)995 vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length,
996 uint16_t dataid)
997 {
998 struct vm_backdoor frame;
999
1000 /* Get data using enhanced RPC. */
1001 memset(&frame, 0, sizeof(frame));
1002 frame.eax = VM_MAGIC;
1003 frame.ebx = VM_RPC_ENH_DATA;
1004 frame.ecx = length;
1005 frame.edx = VM_REG_PORT_RPC(rpc->channel);
1006 frame.esi = rpc->cookie1;
1007 frame.edi = (register_t)data;
1008 frame.ebp = rpc->cookie2;
1009
1010 vm_ins(&frame);
1011
1012 /* NUL-terminate the data */
1013 data[length] = '\0';
1014
1015 if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) {
1016 printf("vmware: get data failed, ebx=%#"PRIxREGISTER"\n",
1017 frame.ebx);
1018 return EIO;
1019 }
1020
1021 /* Acknowledge data received. */
1022 memset(&frame, 0, sizeof(frame));
1023 frame.eax = VM_MAGIC;
1024 frame.ebx = dataid;
1025 frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_END);
1026 frame.edx = VM_REG_PORT_CMD(rpc->channel);
1027 frame.esi = rpc->cookie1;
1028 frame.edi = rpc->cookie2;
1029
1030 vm_cmd(&frame);
1031
1032 if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0) {
1033 printf("vmware: ack data failed, "
1034 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
1035 frame.eax, frame.ecx);
1036 return EIO;
1037 }
1038
1039 return 0;
1040 }
1041
1042 static int
vm_rpc_get_length(const struct vm_rpc * rpc,uint32_t * length,uint16_t * dataid)1043 vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid)
1044 {
1045 struct vm_backdoor frame;
1046
1047 memset(&frame, 0, sizeof(frame));
1048 frame.eax = VM_MAGIC;
1049 frame.ebx = 0;
1050 frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_LENGTH);
1051 frame.edx = VM_REG_PORT_CMD(rpc->channel);
1052 frame.esi = rpc->cookie1;
1053 frame.edi = rpc->cookie2;
1054
1055 vm_cmd(&frame);
1056
1057 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) ==
1058 0) {
1059 printf("vmware: get length failed, "
1060 "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
1061 frame.eax, frame.ecx);
1062 return EIO;
1063 }
1064 if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_DORECV) ==
1065 0) {
1066 *length = 0;
1067 *dataid = 0;
1068 } else {
1069 *length = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK);
1070 *dataid = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK);
1071 }
1072
1073 return 0;
1074 }
1075
1076 static int
vm_rpci_response_successful(struct vmt_softc * sc)1077 vm_rpci_response_successful(struct vmt_softc *sc)
1078 {
1079 return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' ');
1080 }
1081
1082 static int
vm_rpc_send_rpci_tx_buf(struct vmt_softc * sc,const uint8_t * buf,uint32_t length)1083 vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf,
1084 uint32_t length)
1085 {
1086 struct vm_rpc rpci;
1087 u_int32_t rlen;
1088 u_int16_t ack;
1089 int result = 0;
1090
1091 if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
1092 device_printf(sc->sc_dev, "rpci channel open failed\n");
1093 return EIO;
1094 }
1095
1096 if (vm_rpc_send(&rpci, sc->sc_rpc_buf, length) != 0) {
1097 device_printf(sc->sc_dev, "unable to send rpci command\n");
1098 result = EIO;
1099 goto out;
1100 }
1101
1102 if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
1103 device_printf(sc->sc_dev,
1104 "failed to get length of rpci response data\n");
1105 result = EIO;
1106 goto out;
1107 }
1108
1109 if (rlen > 0) {
1110 if (rlen >= VMT_RPC_BUFLEN) {
1111 rlen = VMT_RPC_BUFLEN - 1;
1112 }
1113
1114 if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) {
1115 device_printf(sc->sc_dev,
1116 "failed to get rpci response data\n");
1117 result = EIO;
1118 goto out;
1119 }
1120 }
1121
1122 out:
1123 if (vm_rpc_close(&rpci) != 0) {
1124 device_printf(sc->sc_dev, "unable to close rpci channel\n");
1125 }
1126
1127 return result;
1128 }
1129
1130 static int
vm_rpc_send_rpci_tx(struct vmt_softc * sc,const char * fmt,...)1131 vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...)
1132 {
1133 va_list args;
1134 int len;
1135
1136 va_start(args, fmt);
1137 len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args);
1138 va_end(args);
1139
1140 if (len >= VMT_RPC_BUFLEN) {
1141 device_printf(sc->sc_dev,
1142 "rpci command didn't fit in buffer\n");
1143 return EIO;
1144 }
1145
1146 return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len);
1147 }
1148
1149 #if 0
1150 struct vm_backdoor frame;
1151
1152 memset(&frame, 0, sizeof(frame));
1153
1154 frame.eax = VM_MAGIC;
1155 frame.ecx = VM_CMD_GET_VERSION;
1156 frame.edx = VM_PORT_CMD;
1157
1158 printf("\n");
1159 printf("eax %#"PRIxREGISTER"\n", frame.eax);
1160 printf("ebx %#"PRIxREGISTER"\n", frame.ebx);
1161 printf("ecx %#"PRIxREGISTER"\n", frame.ecx);
1162 printf("edx %#"PRIxREGISTER"\n", frame.edx)
1163 printf("ebp %#"PRIxREGISTER"\n", frame.ebp);
1164 printf("edi %#"PRIxREGISTER"\n", frame.edi);
1165 printf("esi %#"PRIxREGISTER"\n", frame.esi);
1166
1167 vm_cmd(&frame);
1168
1169 printf("-\n");
1170 printf("eax %#"PRIxREGISTER"\n", frame.eax);
1171 printf("ebx %#"PRIxREGISTER"\n", frame.ebx);
1172 printf("ecx %#"PRIxREGISTER"\n", frame.ecx);
1173 printf("edx %#"PRIxREGISTER"\n", frame.edx);
1174 printf("ebp %#"PRIxREGISTER"\n", frame.ebp);
1175 printf("edi %#"PRIxREGISTER"\n", frame.edi);
1176 printf("esi %#"PRIxREGISTER"\n", frame.esi);
1177 #endif
1178
1179 /*
1180 * Notes on tracing backdoor activity in vmware-guestd:
1181 *
1182 * - Find the addresses of the inl / rep insb / rep outsb
1183 * instructions used to perform backdoor operations.
1184 * One way to do this is to disassemble vmware-guestd:
1185 *
1186 * $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S
1187 *
1188 * and search for '<tab>in ' in the resulting file. The rep insb and
1189 * rep outsb code is directly below that.
1190 *
1191 * - Run vmware-guestd under gdb, setting up breakpoints as follows:
1192 * (the addresses shown here are the ones from VMware-server-1.0.10-203137,
1193 * the last version that actually works in FreeBSD emulation on OpenBSD)
1194 *
1195 * break *0x805497b (address of 'in' instruction)
1196 * commands 1
1197 * silent
1198 * echo INOUT\n
1199 * print/x $ecx
1200 * print/x $ebx
1201 * print/x $edx
1202 * continue
1203 * end
1204 * break *0x805497c (address of instruction after 'in')
1205 * commands 2
1206 * silent
1207 * echo ===\n
1208 * print/x $ecx
1209 * print/x $ebx
1210 * print/x $edx
1211 * echo \n
1212 * continue
1213 * end
1214 * break *0x80549b7 (address of instruction before 'rep insb')
1215 * commands 3
1216 * silent
1217 * set variable $inaddr = $edi
1218 * set variable $incount = $ecx
1219 * continue
1220 * end
1221 * break *0x80549ba (address of instruction after 'rep insb')
1222 * commands 4
1223 * silent
1224 * echo IN\n
1225 * print $incount
1226 * x/s $inaddr
1227 * echo \n
1228 * continue
1229 * end
1230 * break *0x80549fb (address of instruction before 'rep outsb')
1231 * commands 5
1232 * silent
1233 * echo OUT\n
1234 * print $ecx
1235 * x/s $esi
1236 * echo \n
1237 * continue
1238 * end
1239 *
1240 * This will produce a log of the backdoor operations, including the
1241 * data sent and received and the relevant register values. You can then
1242 * match the register values to the various constants in this file.
1243 */
1244