xref: /netbsd-src/sys/dev/vmt/vmt_subr.c (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 /* $NetBSD: vmt_subr.c,v 1.3 2021/03/27 21:23:14 ryo Exp $ */
2 /* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */
3 
4 /*
5  * Copyright (c) 2007 David Crawshaw <david@zentus.com>
6  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 /*
22  * Protocol reverse engineered by Ken Kato:
23  * https://sites.google.com/site/chitchatvmback/backdoor
24  */
25 
26 #include <sys/param.h>
27 #include <sys/types.h>
28 #include <sys/callout.h>
29 #include <sys/device.h>
30 #include <sys/endian.h>
31 #include <sys/kernel.h>
32 #include <sys/kmem.h>
33 #include <sys/module.h>
34 #include <sys/proc.h>
35 #include <sys/reboot.h>
36 #include <sys/socket.h>
37 #include <sys/sysctl.h>
38 #include <sys/syslog.h>
39 #include <sys/systm.h>
40 #include <sys/timetc.h>
41 
42 #include <net/if.h>
43 #include <netinet/in.h>
44 
45 #include <dev/sysmon/sysmonvar.h>
46 #include <dev/sysmon/sysmon_taskq.h>
47 #include <dev/vmt/vmtreg.h>
48 #include <dev/vmt/vmtvar.h>
49 
50 /* #define VMT_DEBUG */
51 
52 static int vmt_sysctl_setup_root(device_t);
53 static int vmt_sysctl_setup_clock_sync(device_t, const struct sysctlnode *);
54 static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_PROTO);
55 
56 static void vm_cmd(struct vm_backdoor *);
57 static void vm_ins(struct vm_backdoor *);
58 static void vm_outs(struct vm_backdoor *);
59 
60 /* Functions for communicating with the VM Host. */
61 static int vm_rpc_open(struct vm_rpc *, uint32_t);
62 static int vm_rpc_close(struct vm_rpc *);
63 static int vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t);
64 static int vm_rpc_send_str(const struct vm_rpc *, const uint8_t *);
65 static int vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *);
66 static int vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t);
67 static int vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t);
68 static int vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...)
69     __printflike(2, 3);
70 static int vm_rpci_response_successful(struct vmt_softc *);
71 
72 static void vmt_tclo_state_change_success(struct vmt_softc *, int, char);
73 static void vmt_do_reboot(struct vmt_softc *);
74 static void vmt_do_shutdown(struct vmt_softc *);
75 
76 static void vmt_update_guest_info(struct vmt_softc *);
77 static void vmt_update_guest_uptime(struct vmt_softc *);
78 static void vmt_sync_guest_clock(struct vmt_softc *);
79 
80 static void vmt_tick(void *);
81 static void vmt_tclo_tick(void *);
82 static void vmt_clock_sync_tick(void *);
83 static bool vmt_shutdown(device_t, int);
84 static void vmt_pswitch_event(void *);
85 
86 extern char hostname[MAXHOSTNAMELEN];
87 
88 static void
89 vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd)
90 {
91 	memset(frame, 0, sizeof(*frame));
92 
93 	frame->eax = VM_MAGIC;
94 	frame->ebx = ~VM_MAGIC & VM_REG_WORD_MASK;
95 	frame->ecx = VM_REG_CMD(0xffff, cmd);
96 	frame->edx = VM_REG_CMD(0, VM_PORT_CMD);
97 
98 	vm_cmd(frame);
99 }
100 
101 bool
102 vmt_probe(void)
103 {
104 	struct vm_backdoor frame;
105 
106 	vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
107 	if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff ||
108 	    __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC)
109 		return false;
110 
111 	vmt_probe_cmd(&frame, VM_CMD_GET_SPEED);
112 	if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == VM_MAGIC)
113 		return false;
114 
115 	return true;
116 }
117 
118 void
119 vmt_common_attach(struct vmt_softc *sc)
120 {
121 	device_t self;
122 	struct vm_backdoor frame;
123 	int rv;
124 
125 	self = sc->sc_dev;
126 	sc->sc_log = NULL;
127 
128 	/* check again */
129 	vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
130 	if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) == 0xffffffff ||
131 	    __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_MAGIC) {
132 		aprint_error_dev(self, "failed to get VMware version\n");
133 		return;
134 	}
135 
136 	/* show uuid */
137 	{
138 		struct uuid uuid;
139 		uint32_t u;
140 
141 		vmt_probe_cmd(&frame, VM_CMD_GET_BIOS_UUID);
142 		uuid.time_low =
143 		    bswap32(__SHIFTOUT(frame.eax, VM_REG_WORD_MASK));
144 		u = bswap32(__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK));
145 		uuid.time_mid = u >> 16;
146 		uuid.time_hi_and_version = u;
147 		u = bswap32(__SHIFTOUT(frame.ecx, VM_REG_WORD_MASK));
148 		uuid.clock_seq_hi_and_reserved = u >> 24;
149 		uuid.clock_seq_low = u >> 16;
150 		uuid.node[0] = u >> 8;
151 		uuid.node[1] = u;
152 		u = bswap32(__SHIFTOUT(frame.edx, VM_REG_WORD_MASK));
153 		uuid.node[2] = u >> 24;
154 		uuid.node[3] = u >> 16;
155 		uuid.node[4] = u >> 8;
156 		uuid.node[5] = u;
157 
158 		uuid_snprintf(sc->sc_uuid, sizeof(sc->sc_uuid), &uuid);
159 		aprint_verbose_dev(sc->sc_dev, "UUID: %s\n", sc->sc_uuid);
160 	}
161 
162 	callout_init(&sc->sc_tick, 0);
163 	callout_init(&sc->sc_tclo_tick, 0);
164 	callout_init(&sc->sc_clock_sync_tick, 0);
165 
166 	sc->sc_clock_sync_period_seconds = VMT_CLOCK_SYNC_PERIOD_SECONDS;
167 
168 	rv = vmt_sysctl_setup_root(self);
169 	if (rv != 0) {
170 		aprint_error_dev(self, "failed to initialize sysctl "
171 		    "(err %d)\n", rv);
172 		goto free;
173 	}
174 
175 	sc->sc_rpc_buf = kmem_alloc(VMT_RPC_BUFLEN, KM_SLEEP);
176 
177 	if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
178 		aprint_error_dev(self, "failed to open backdoor RPC channel (TCLO protocol)\n");
179 		goto free;
180 	}
181 	sc->sc_tclo_rpc_open = true;
182 
183 	/* don't know if this is important at all yet */
184 	if (vm_rpc_send_rpci_tx(sc, "tools.capability.hgfs_server toolbox 1") != 0) {
185 		aprint_error_dev(self, "failed to set HGFS server capability\n");
186 		goto free;
187 	}
188 
189 	pmf_device_register1(self, NULL, NULL, vmt_shutdown);
190 
191 	sysmon_task_queue_init();
192 
193 	sc->sc_ev_power.ev_smpsw.smpsw_type = PSWITCH_TYPE_POWER;
194 	sc->sc_ev_power.ev_smpsw.smpsw_name = device_xname(self);
195 	sc->sc_ev_power.ev_code = PSWITCH_EVENT_PRESSED;
196 	sysmon_pswitch_register(&sc->sc_ev_power.ev_smpsw);
197 	sc->sc_ev_reset.ev_smpsw.smpsw_type = PSWITCH_TYPE_RESET;
198 	sc->sc_ev_reset.ev_smpsw.smpsw_name = device_xname(self);
199 	sc->sc_ev_reset.ev_code = PSWITCH_EVENT_PRESSED;
200 	sysmon_pswitch_register(&sc->sc_ev_reset.ev_smpsw);
201 	sc->sc_ev_sleep.ev_smpsw.smpsw_type = PSWITCH_TYPE_SLEEP;
202 	sc->sc_ev_sleep.ev_smpsw.smpsw_name = device_xname(self);
203 	sc->sc_ev_sleep.ev_code = PSWITCH_EVENT_RELEASED;
204 	sysmon_pswitch_register(&sc->sc_ev_sleep.ev_smpsw);
205 	sc->sc_smpsw_valid = true;
206 
207 	callout_setfunc(&sc->sc_tick, vmt_tick, sc);
208 	callout_schedule(&sc->sc_tick, hz);
209 
210 	callout_setfunc(&sc->sc_tclo_tick, vmt_tclo_tick, sc);
211 	callout_schedule(&sc->sc_tclo_tick, hz);
212 	sc->sc_tclo_ping = 1;
213 
214 	callout_setfunc(&sc->sc_clock_sync_tick, vmt_clock_sync_tick, sc);
215 	callout_schedule(&sc->sc_clock_sync_tick,
216 	    mstohz(sc->sc_clock_sync_period_seconds * 1000));
217 
218 	vmt_sync_guest_clock(sc);
219 
220 	return;
221 
222 free:
223 	if (sc->sc_rpc_buf)
224 		kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
225 	pmf_device_register(self, NULL, NULL);
226 	if (sc->sc_log)
227 		sysctl_teardown(&sc->sc_log);
228 }
229 
230 int
231 vmt_common_detach(struct vmt_softc *sc)
232 {
233 	if (sc->sc_tclo_rpc_open)
234 		vm_rpc_close(&sc->sc_tclo_rpc);
235 
236 	if (sc->sc_smpsw_valid) {
237 		sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw);
238 		sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw);
239 		sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw);
240 	}
241 
242 	callout_halt(&sc->sc_tick, NULL);
243 	callout_destroy(&sc->sc_tick);
244 
245 	callout_halt(&sc->sc_tclo_tick, NULL);
246 	callout_destroy(&sc->sc_tclo_tick);
247 
248 	callout_halt(&sc->sc_clock_sync_tick, NULL);
249 	callout_destroy(&sc->sc_clock_sync_tick);
250 
251 	if (sc->sc_rpc_buf)
252 		kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN);
253 
254 	if (sc->sc_log) {
255 		sysctl_teardown(&sc->sc_log);
256 		sc->sc_log = NULL;
257 	}
258 
259 	return 0;
260 }
261 
262 static int
263 vmt_sysctl_setup_root(device_t self)
264 {
265 	const struct sysctlnode *machdep_node, *vmt_node;
266 	struct vmt_softc *sc = device_private(self);
267 	int rv;
268 
269 	rv = sysctl_createv(&sc->sc_log, 0, NULL, &machdep_node,
270 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
271 	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
272 	if (rv != 0)
273 		goto fail;
274 
275 	rv = sysctl_createv(&sc->sc_log, 0, &machdep_node, &vmt_node,
276 	    0, CTLTYPE_NODE, device_xname(self), NULL,
277 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
278 	if (rv != 0)
279 		goto fail;
280 
281 	rv = sysctl_createv(&sc->sc_log, 0, &vmt_node, NULL,
282 	    CTLFLAG_READONLY, CTLTYPE_STRING, "uuid",
283 	    SYSCTL_DESCR("UUID of virtual machine"),
284 	    NULL, 0, sc->sc_uuid, 0,
285 	    CTL_CREATE, CTL_EOL);
286 
287 	rv = vmt_sysctl_setup_clock_sync(self, vmt_node);
288 	if (rv != 0)
289 		goto fail;
290 
291 	return 0;
292 
293 fail:
294 	sysctl_teardown(&sc->sc_log);
295 	sc->sc_log = NULL;
296 
297 	return rv;
298 }
299 
300 static int
301 vmt_sysctl_setup_clock_sync(device_t self, const struct sysctlnode *root_node)
302 {
303 	const struct sysctlnode *node, *period_node;
304 	struct vmt_softc *sc = device_private(self);
305 	int rv;
306 
307 	rv = sysctl_createv(&sc->sc_log, 0, &root_node, &node,
308 	    0, CTLTYPE_NODE, "clock_sync", NULL,
309 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
310 	if (rv != 0)
311 		return rv;
312 
313 	rv = sysctl_createv(&sc->sc_log, 0, &node, &period_node,
314 	    CTLFLAG_READWRITE, CTLTYPE_INT, "period",
315 	    SYSCTL_DESCR("Period, in seconds, at which to update the "
316 	        "guest's clock"),
317 	    vmt_sysctl_update_clock_sync_period, 0, (void *)sc, 0,
318 	    CTL_CREATE, CTL_EOL);
319 	return rv;
320 }
321 
322 static int
323 vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS)
324 {
325 	int error, period;
326 	struct sysctlnode node;
327 	struct vmt_softc *sc;
328 
329 	node = *rnode;
330 	sc = (struct vmt_softc *)node.sysctl_data;
331 
332 	period = sc->sc_clock_sync_period_seconds;
333 	node.sysctl_data = &period;
334 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
335 	if (error || newp == NULL)
336 		return error;
337 
338 	if (sc->sc_clock_sync_period_seconds != period) {
339 		callout_halt(&sc->sc_clock_sync_tick, NULL);
340 		sc->sc_clock_sync_period_seconds = period;
341 		if (sc->sc_clock_sync_period_seconds > 0)
342 			callout_schedule(&sc->sc_clock_sync_tick,
343 			    mstohz(sc->sc_clock_sync_period_seconds * 1000));
344 	}
345 	return 0;
346 }
347 
348 static void
349 vmt_clock_sync_tick(void *xarg)
350 {
351 	struct vmt_softc *sc = xarg;
352 
353 	vmt_sync_guest_clock(sc);
354 
355 	callout_schedule(&sc->sc_clock_sync_tick,
356 	    mstohz(sc->sc_clock_sync_period_seconds * 1000));
357 }
358 
359 static void
360 vmt_update_guest_uptime(struct vmt_softc *sc)
361 {
362 	/* host wants uptime in hundredths of a second */
363 	if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %" PRId64 "00",
364 	    VM_GUEST_INFO_UPTIME, time_uptime) != 0) {
365 		device_printf(sc->sc_dev, "unable to set guest uptime\n");
366 		sc->sc_rpc_error = 1;
367 	}
368 }
369 
370 static void
371 vmt_update_guest_info(struct vmt_softc *sc)
372 {
373 	if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) {
374 		strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname));
375 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
376 		    VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) {
377 			device_printf(sc->sc_dev, "unable to set hostname\n");
378 			sc->sc_rpc_error = 1;
379 		}
380 	}
381 
382 	/*
383 	 * we're supposed to pass the full network address information back here,
384 	 * but that involves xdr (sunrpc) data encoding, which seems a bit unreasonable.
385 	 */
386 
387 	if (sc->sc_set_guest_os == 0) {
388 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s %s %s",
389 		    VM_GUEST_INFO_OS_NAME_FULL, ostype, osrelease, machine_arch) != 0) {
390 			device_printf(sc->sc_dev, "unable to set full guest OS\n");
391 			sc->sc_rpc_error = 1;
392 		}
393 
394 		/*
395 		 * host doesn't like it if we send an OS name it doesn't recognise,
396 		 * so use "other" for i386 and "other-64" for amd64
397 		 */
398 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
399 		    VM_GUEST_INFO_OS_NAME, VM_OS_NAME) != 0) {
400 			device_printf(sc->sc_dev, "unable to set guest OS\n");
401 			sc->sc_rpc_error = 1;
402 		}
403 
404 		sc->sc_set_guest_os = 1;
405 	}
406 }
407 
408 static void
409 vmt_sync_guest_clock(struct vmt_softc *sc)
410 {
411 	struct vm_backdoor frame;
412 	struct timespec ts;
413 
414 	memset(&frame, 0, sizeof(frame));
415 	frame.eax = VM_MAGIC;
416 	frame.ecx = VM_CMD_GET_TIME_FULL;
417 	frame.edx = VM_REG_CMD(0, VM_PORT_CMD);
418 	vm_cmd(&frame);
419 
420 	if (__SHIFTOUT(frame.eax, VM_REG_WORD_MASK) != 0xffffffff) {
421 		ts.tv_sec = ((uint64_t)(
422 		    __SHIFTOUT(frame.esi, VM_REG_WORD_MASK) << 32)) |
423 		    __SHIFTOUT(frame.edx, VM_REG_WORD_MASK);
424 		ts.tv_nsec = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) * 1000;
425 		tc_setclock(&ts);
426 	}
427 }
428 
429 static void
430 vmt_tick(void *xarg)
431 {
432 	struct vmt_softc *sc = xarg;
433 
434 	vmt_update_guest_info(sc);
435 	vmt_update_guest_uptime(sc);
436 
437 	callout_schedule(&sc->sc_tick, hz * 15);
438 }
439 
440 static void
441 vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state)
442 {
443 	if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d",
444 	    success, state) != 0) {
445 		device_printf(sc->sc_dev, "unable to send state change result\n");
446 		sc->sc_rpc_error = 1;
447 	}
448 }
449 
450 static void
451 vmt_do_shutdown(struct vmt_softc *sc)
452 {
453 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT);
454 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
455 
456 	device_printf(sc->sc_dev, "host requested shutdown\n");
457 	sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_power);
458 }
459 
460 static void
461 vmt_do_reboot(struct vmt_softc *sc)
462 {
463 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT);
464 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
465 
466 	device_printf(sc->sc_dev, "host requested reboot\n");
467 	sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_reset);
468 }
469 
470 static void
471 vmt_do_resume(struct vmt_softc *sc)
472 {
473 	device_printf(sc->sc_dev, "guest resuming from suspended state\n");
474 
475 	vmt_sync_guest_clock(sc);
476 
477 	/* force guest info update */
478 	sc->sc_hostname[0] = '\0';
479 	sc->sc_set_guest_os = 0;
480 	vmt_update_guest_info(sc);
481 
482 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME);
483 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
484 		device_printf(sc->sc_dev, "error sending resume response\n");
485 		sc->sc_rpc_error = 1;
486 	}
487 
488 	sysmon_task_queue_sched(0, vmt_pswitch_event, &sc->sc_ev_sleep);
489 }
490 
491 static bool
492 vmt_shutdown(device_t self, int flags)
493 {
494 	struct vmt_softc *sc = device_private(self);
495 
496 	if (vm_rpc_send_rpci_tx(sc, "tools.capability.hgfs_server toolbox 0") != 0) {
497 		device_printf(sc->sc_dev, "failed to disable hgfs server capability\n");
498 	}
499 
500 	if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
501 		device_printf(sc->sc_dev, "failed to send shutdown ping\n");
502 	}
503 
504 	vm_rpc_close(&sc->sc_tclo_rpc);
505 
506 	return true;
507 }
508 
509 static void
510 vmt_pswitch_event(void *xarg)
511 {
512 	struct vmt_event *ev = xarg;
513 
514 	sysmon_pswitch_event(&ev->ev_smpsw, ev->ev_code);
515 }
516 
517 static void
518 vmt_tclo_tick(void *xarg)
519 {
520 	struct vmt_softc *sc = xarg;
521 	u_int32_t rlen;
522 	u_int16_t ack;
523 
524 	/* reopen tclo channel if it's currently closed */
525 	if (sc->sc_tclo_rpc.channel == 0 &&
526 	    sc->sc_tclo_rpc.cookie1 == 0 &&
527 	    sc->sc_tclo_rpc.cookie2 == 0) {
528 		if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
529 			device_printf(sc->sc_dev, "unable to reopen TCLO channel\n");
530 			callout_schedule(&sc->sc_tclo_tick, hz * 15);
531 			return;
532 		}
533 
534 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
535 			device_printf(sc->sc_dev, "failed to send reset reply\n");
536 			sc->sc_rpc_error = 1;
537 			goto out;
538 		} else {
539 			sc->sc_rpc_error = 0;
540 		}
541 	}
542 
543 	if (sc->sc_tclo_ping) {
544 		if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
545 			device_printf(sc->sc_dev, "failed to send TCLO outgoing ping\n");
546 			sc->sc_rpc_error = 1;
547 			goto out;
548 		}
549 	}
550 
551 	if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) {
552 		device_printf(sc->sc_dev, "failed to get length of incoming TCLO data\n");
553 		sc->sc_rpc_error = 1;
554 		goto out;
555 	}
556 
557 	if (rlen == 0) {
558 		sc->sc_tclo_ping = 1;
559 		goto out;
560 	}
561 
562 	if (rlen >= VMT_RPC_BUFLEN) {
563 		rlen = VMT_RPC_BUFLEN - 1;
564 	}
565 	if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) {
566 		device_printf(sc->sc_dev, "failed to get incoming TCLO data\n");
567 		sc->sc_rpc_error = 1;
568 		goto out;
569 	}
570 	sc->sc_tclo_ping = 0;
571 
572 #ifdef VMT_DEBUG
573 	printf("vmware: received message '%s'\n", sc->sc_rpc_buf);
574 #endif
575 
576 	if (strcmp(sc->sc_rpc_buf, "reset") == 0) {
577 
578 		if (sc->sc_rpc_error != 0) {
579 			device_printf(sc->sc_dev, "resetting rpc\n");
580 			vm_rpc_close(&sc->sc_tclo_rpc);
581 			/* reopen and send the reset reply next time around */
582 			goto out;
583 		}
584 
585 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
586 			device_printf(sc->sc_dev, "failed to send reset reply\n");
587 			sc->sc_rpc_error = 1;
588 		}
589 
590 	} else if (strcmp(sc->sc_rpc_buf, "ping") == 0) {
591 
592 		vmt_update_guest_info(sc);
593 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
594 			device_printf(sc->sc_dev, "error sending ping response\n");
595 			sc->sc_rpc_error = 1;
596 		}
597 
598 	} else if (strcmp(sc->sc_rpc_buf, "OS_Halt") == 0) {
599 		vmt_do_shutdown(sc);
600 	} else if (strcmp(sc->sc_rpc_buf, "OS_Reboot") == 0) {
601 		vmt_do_reboot(sc);
602 	} else if (strcmp(sc->sc_rpc_buf, "OS_PowerOn") == 0) {
603 		vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON);
604 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
605 			device_printf(sc->sc_dev, "error sending poweron response\n");
606 			sc->sc_rpc_error = 1;
607 		}
608 	} else if (strcmp(sc->sc_rpc_buf, "OS_Suspend") == 0) {
609 		log(LOG_KERN | LOG_NOTICE, "VMware guest entering suspended state\n");
610 
611 		vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND);
612 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
613 			device_printf(sc->sc_dev, "error sending suspend response\n");
614 			sc->sc_rpc_error = 1;
615 		}
616 	} else if (strcmp(sc->sc_rpc_buf, "OS_Resume") == 0) {
617 		vmt_do_resume(sc);
618 	} else if (strcmp(sc->sc_rpc_buf, "Capabilities_Register") == 0) {
619 
620 		/* don't know if this is important at all */
621 		if (vm_rpc_send_rpci_tx(sc, "vmx.capability.unified_loop toolbox") != 0) {
622 			device_printf(sc->sc_dev, "unable to set unified loop\n");
623 			sc->sc_rpc_error = 1;
624 		}
625 		if (vm_rpci_response_successful(sc) == 0) {
626 			device_printf(sc->sc_dev, "host rejected unified loop setting\n");
627 		}
628 
629 		/* the trailing space is apparently important here */
630 		if (vm_rpc_send_rpci_tx(sc, "tools.capability.statechange ") != 0) {
631 			device_printf(sc->sc_dev, "unable to send statechange capability\n");
632 			sc->sc_rpc_error = 1;
633 		}
634 		if (vm_rpci_response_successful(sc) == 0) {
635 			device_printf(sc->sc_dev, "host rejected statechange capability\n");
636 		}
637 
638 		if (vm_rpc_send_rpci_tx(sc, "tools.set.version %u", VM_VERSION_UNMANAGED) != 0) {
639 			device_printf(sc->sc_dev, "unable to set tools version\n");
640 			sc->sc_rpc_error = 1;
641 		}
642 
643 		vmt_update_guest_uptime(sc);
644 
645 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
646 			device_printf(sc->sc_dev, "error sending capabilities_register response\n");
647 			sc->sc_rpc_error = 1;
648 		}
649 	} else if (strcmp(sc->sc_rpc_buf, "Set_Option broadcastIP 1") == 0) {
650 		struct ifaddr *iface_addr = NULL;
651 		struct ifnet *iface;
652 		struct sockaddr_in *guest_ip;
653 		int s;
654 		struct psref psref;
655 
656 		/* find first available ipv4 address */
657 		guest_ip = NULL;
658 		s = pserialize_read_enter();
659 		IFNET_READER_FOREACH(iface) {
660 
661 			/* skip loopback */
662 			if (strncmp(iface->if_xname, "lo", 2) == 0 &&
663 			    iface->if_xname[2] >= '0' && iface->if_xname[2] <= '9') {
664 				continue;
665 			}
666 
667 			IFADDR_READER_FOREACH(iface_addr, iface) {
668 				if (iface_addr->ifa_addr->sa_family != AF_INET) {
669 					continue;
670 				}
671 
672 				guest_ip = satosin(iface_addr->ifa_addr);
673 				ifa_acquire(iface_addr, &psref);
674 				goto got;
675 			}
676 		}
677 	got:
678 		pserialize_read_exit(s);
679 
680 		if (guest_ip != NULL) {
681 			if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s",
682 			    inet_ntoa(guest_ip->sin_addr)) != 0) {
683 				device_printf(sc->sc_dev, "unable to send guest IP address\n");
684 				sc->sc_rpc_error = 1;
685 			}
686 			ifa_release(iface_addr, &psref);
687 
688 			if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
689 				device_printf(sc->sc_dev, "error sending broadcastIP response\n");
690 				sc->sc_rpc_error = 1;
691 			}
692 		} else {
693 			if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_ERROR_IP_ADDR) != 0) {
694 				device_printf(sc->sc_dev,
695 				    "error sending broadcastIP error response\n");
696 				sc->sc_rpc_error = 1;
697 			}
698 		}
699 	} else {
700 		if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_ERROR) != 0) {
701 			device_printf(sc->sc_dev, "error sending unknown command reply\n");
702 			sc->sc_rpc_error = 1;
703 		}
704 	}
705 
706 out:
707 	callout_schedule(&sc->sc_tclo_tick, sc->sc_tclo_ping ? hz : 1);
708 }
709 
710 static void
711 vm_cmd(struct vm_backdoor *frame)
712 {
713 	BACKDOOR_OP(BACKDOOR_OP_CMD, frame);
714 }
715 
716 static void
717 vm_ins(struct vm_backdoor *frame)
718 {
719 	BACKDOOR_OP(BACKDOOR_OP_IN, frame);
720 }
721 
722 static void
723 vm_outs(struct vm_backdoor *frame)
724 {
725 	BACKDOOR_OP(BACKDOOR_OP_OUT, frame);
726 }
727 
728 static int
729 vm_rpc_open(struct vm_rpc *rpc, uint32_t proto)
730 {
731 	struct vm_backdoor frame;
732 
733 	memset(&frame, 0, sizeof(frame));
734 	frame.eax = VM_MAGIC;
735 	frame.ebx = proto | VM_RPC_FLAG_COOKIE;
736 	frame.ecx = VM_REG_CMD_RPC(VM_RPC_OPEN);
737 	frame.edx = VM_REG_PORT_CMD(0);
738 
739 	vm_cmd(&frame);
740 
741 	if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) != 1 ||
742 	    __SHIFTOUT(frame.edx, VM_REG_LOW_MASK) != 0) {
743 		/* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */
744 		printf("vmware: open failed, eax=%#"PRIxREGISTER
745 		    ", ecx=%#"PRIxREGISTER", edx=%#"PRIxREGISTER"\n",
746 		    frame.eax, frame.ecx, frame.edx);
747 		return EIO;
748 	}
749 
750 	rpc->channel = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK);
751 	rpc->cookie1 = __SHIFTOUT(frame.esi, VM_REG_WORD_MASK);
752 	rpc->cookie2 = __SHIFTOUT(frame.edi, VM_REG_WORD_MASK);
753 
754 	return 0;
755 }
756 
757 static int
758 vm_rpc_close(struct vm_rpc *rpc)
759 {
760 	struct vm_backdoor frame;
761 
762 	memset(&frame, 0, sizeof(frame));
763 	frame.eax = VM_MAGIC;
764 	frame.ebx = 0;
765 	frame.ecx = VM_REG_CMD_RPC(VM_RPC_CLOSE);
766 	frame.edx = VM_REG_PORT_CMD(rpc->channel);
767 	frame.edi = rpc->cookie2;
768 	frame.esi = rpc->cookie1;
769 
770 	vm_cmd(&frame);
771 
772 	if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0 ||
773 	    __SHIFTOUT(frame.ecx, VM_REG_LOW_MASK) != 0) {
774 		printf("vmware: close failed, "
775 		    "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
776 		    frame.eax, frame.ecx);
777 		return EIO;
778 	}
779 
780 	rpc->channel = 0;
781 	rpc->cookie1 = 0;
782 	rpc->cookie2 = 0;
783 
784 	return 0;
785 }
786 
787 static int
788 vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length)
789 {
790 	struct vm_backdoor frame;
791 
792 	/* Send the length of the command. */
793 	memset(&frame, 0, sizeof(frame));
794 	frame.eax = VM_MAGIC;
795 	frame.ebx = length;
796 	frame.ecx = VM_REG_CMD_RPC(VM_RPC_SET_LENGTH);
797 	frame.edx = VM_REG_PORT_CMD(rpc->channel);
798 	frame.esi = rpc->cookie1;
799 	frame.edi = rpc->cookie2;
800 
801 	vm_cmd(&frame);
802 
803 	if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) ==
804 	    0) {
805 		printf("vmware: sending length failed, "
806 		    "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
807 		    frame.eax, frame.ecx);
808 		return EIO;
809 	}
810 
811 	if (length == 0)
812 		return 0; /* Only need to poke once if command is null. */
813 
814 	/* Send the command using enhanced RPC. */
815 	memset(&frame, 0, sizeof(frame));
816 	frame.eax = VM_MAGIC;
817 	frame.ebx = VM_RPC_ENH_DATA;
818 	frame.ecx = length;
819 	frame.edx = VM_REG_PORT_RPC(rpc->channel);
820 	frame.ebp = rpc->cookie1;
821 	frame.edi = rpc->cookie2;
822 	frame.esi = (register_t)buf;
823 
824 	vm_outs(&frame);
825 
826 	if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) {
827 		/* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */
828 		printf("vmware: send failed, ebx=%#"PRIxREGISTER"\n",
829 		    frame.ebx);
830 		return EIO;
831 	}
832 
833 	return 0;
834 }
835 
836 static int
837 vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str)
838 {
839 	return vm_rpc_send(rpc, str, strlen(str));
840 }
841 
842 static int
843 vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length,
844     uint16_t dataid)
845 {
846 	struct vm_backdoor frame;
847 
848 	/* Get data using enhanced RPC. */
849 	memset(&frame, 0, sizeof(frame));
850 	frame.eax = VM_MAGIC;
851 	frame.ebx = VM_RPC_ENH_DATA;
852 	frame.ecx = length;
853 	frame.edx = VM_REG_PORT_RPC(rpc->channel);
854 	frame.esi = rpc->cookie1;
855 	frame.edi = (register_t)data;
856 	frame.ebp = rpc->cookie2;
857 
858 	vm_ins(&frame);
859 
860 	/* NUL-terminate the data */
861 	data[length] = '\0';
862 
863 	if (__SHIFTOUT(frame.ebx, VM_REG_WORD_MASK) != VM_RPC_ENH_DATA) {
864 		printf("vmware: get data failed, ebx=%#"PRIxREGISTER"\n",
865 		    frame.ebx);
866 		return EIO;
867 	}
868 
869 	/* Acknowledge data received. */
870 	memset(&frame, 0, sizeof(frame));
871 	frame.eax = VM_MAGIC;
872 	frame.ebx = dataid;
873 	frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_END);
874 	frame.edx = VM_REG_PORT_CMD(rpc->channel);
875 	frame.esi = rpc->cookie1;
876 	frame.edi = rpc->cookie2;
877 
878 	vm_cmd(&frame);
879 
880 	if (__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) == 0) {
881 		printf("vmware: ack data failed, "
882 		    "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
883 		    frame.eax, frame.ecx);
884 		return EIO;
885 	}
886 
887 	return 0;
888 }
889 
890 static int
891 vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid)
892 {
893 	struct vm_backdoor frame;
894 
895 	memset(&frame, 0, sizeof(frame));
896 	frame.eax = VM_MAGIC;
897 	frame.ebx = 0;
898 	frame.ecx = VM_REG_CMD_RPC(VM_RPC_GET_LENGTH);
899 	frame.edx = VM_REG_PORT_CMD(rpc->channel);
900 	frame.esi = rpc->cookie1;
901 	frame.edi = rpc->cookie2;
902 
903 	vm_cmd(&frame);
904 
905 	if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_SUCCESS) ==
906 	    0) {
907 		printf("vmware: get length failed, "
908 		    "eax=%#"PRIxREGISTER", ecx=%#"PRIxREGISTER"\n",
909 		    frame.eax, frame.ecx);
910 		return EIO;
911 	}
912 	if ((__SHIFTOUT(frame.ecx, VM_REG_HIGH_MASK) & VM_RPC_REPLY_DORECV) ==
913 	    0) {
914 		*length = 0;
915 		*dataid = 0;
916 	} else {
917 		*length = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK);
918 		*dataid = __SHIFTOUT(frame.edx, VM_REG_HIGH_MASK);
919 	}
920 
921 	return 0;
922 }
923 
924 static int
925 vm_rpci_response_successful(struct vmt_softc *sc)
926 {
927 	return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' ');
928 }
929 
930 static int
931 vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf, uint32_t length)
932 {
933 	struct vm_rpc rpci;
934 	u_int32_t rlen;
935 	u_int16_t ack;
936 	int result = 0;
937 
938 	if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
939 		device_printf(sc->sc_dev, "rpci channel open failed\n");
940 		return EIO;
941 	}
942 
943 	if (vm_rpc_send(&rpci, sc->sc_rpc_buf, length) != 0) {
944 		device_printf(sc->sc_dev, "unable to send rpci command\n");
945 		result = EIO;
946 		goto out;
947 	}
948 
949 	if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
950 		device_printf(sc->sc_dev, "failed to get length of rpci response data\n");
951 		result = EIO;
952 		goto out;
953 	}
954 
955 	if (rlen > 0) {
956 		if (rlen >= VMT_RPC_BUFLEN) {
957 			rlen = VMT_RPC_BUFLEN - 1;
958 		}
959 
960 		if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) {
961 			device_printf(sc->sc_dev, "failed to get rpci response data\n");
962 			result = EIO;
963 			goto out;
964 		}
965 	}
966 
967 out:
968 	if (vm_rpc_close(&rpci) != 0) {
969 		device_printf(sc->sc_dev, "unable to close rpci channel\n");
970 	}
971 
972 	return result;
973 }
974 
975 static int
976 vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...)
977 {
978 	va_list args;
979 	int len;
980 
981 	va_start(args, fmt);
982 	len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args);
983 	va_end(args);
984 
985 	if (len >= VMT_RPC_BUFLEN) {
986 		device_printf(sc->sc_dev, "rpci command didn't fit in buffer\n");
987 		return EIO;
988 	}
989 
990 	return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len);
991 }
992 
993 #if 0
994 	struct vm_backdoor frame;
995 
996 	memset(&frame, 0, sizeof(frame));
997 
998 	frame.eax = VM_MAGIC;
999 	frame.ecx = VM_CMD_GET_VERSION;
1000 	frame.edx = VM_PORT_CMD;
1001 
1002 	printf("\n");
1003 	printf("eax %#"PRIxREGISTER"\n", frame.eax);
1004 	printf("ebx %#"PRIxREGISTER"\n", frame.ebx);
1005 	printf("ecx %#"PRIxREGISTER"\n", frame.ecx);
1006 	printf("edx %#"PRIxREGISTER"\n", frame.edx)
1007 	printf("ebp %#"PRIxREGISTER"\n", frame.ebp);
1008 	printf("edi %#"PRIxREGISTER"\n", frame.edi);
1009 	printf("esi %#"PRIxREGISTER"\n", frame.esi);
1010 
1011 	vm_cmd(&frame);
1012 
1013 	printf("-\n");
1014 	printf("eax %#"PRIxREGISTER"\n", frame.eax);
1015 	printf("ebx %#"PRIxREGISTER"\n", frame.ebx);
1016 	printf("ecx %#"PRIxREGISTER"\n", frame.ecx);
1017 	printf("edx %#"PRIxREGISTER"\n", frame.edx);
1018 	printf("ebp %#"PRIxREGISTER"\n", frame.ebp);
1019 	printf("edi %#"PRIxREGISTER"\n", frame.edi);
1020 	printf("esi %#"PRIxREGISTER"\n", frame.esi);
1021 #endif
1022 
1023 /*
1024  * Notes on tracing backdoor activity in vmware-guestd:
1025  *
1026  * - Find the addresses of the inl / rep insb / rep outsb
1027  *   instructions used to perform backdoor operations.
1028  *   One way to do this is to disassemble vmware-guestd:
1029  *
1030  *   $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S
1031  *
1032  *   and search for '<tab>in ' in the resulting file.  The rep insb and
1033  *   rep outsb code is directly below that.
1034  *
1035  * - Run vmware-guestd under gdb, setting up breakpoints as follows:
1036  *   (the addresses shown here are the ones from VMware-server-1.0.10-203137,
1037  *   the last version that actually works in FreeBSD emulation on OpenBSD)
1038  *
1039  * break *0x805497b   (address of 'in' instruction)
1040  * commands 1
1041  * silent
1042  * echo INOUT\n
1043  * print/x $ecx
1044  * print/x $ebx
1045  * print/x $edx
1046  * continue
1047  * end
1048  * break *0x805497c   (address of instruction after 'in')
1049  * commands 2
1050  * silent
1051  * echo ===\n
1052  * print/x $ecx
1053  * print/x $ebx
1054  * print/x $edx
1055  * echo \n
1056  * continue
1057  * end
1058  * break *0x80549b7   (address of instruction before 'rep insb')
1059  * commands 3
1060  * silent
1061  * set variable $inaddr = $edi
1062  * set variable $incount = $ecx
1063  * continue
1064  * end
1065  * break *0x80549ba   (address of instruction after 'rep insb')
1066  * commands 4
1067  * silent
1068  * echo IN\n
1069  * print $incount
1070  * x/s $inaddr
1071  * echo \n
1072  * continue
1073  * end
1074  * break *0x80549fb    (address of instruction before 'rep outsb')
1075  * commands 5
1076  * silent
1077  * echo OUT\n
1078  * print $ecx
1079  * x/s $esi
1080  * echo \n
1081  * continue
1082  * end
1083  *
1084  * This will produce a log of the backdoor operations, including the
1085  * data sent and received and the relevant register values.  You can then
1086  * match the register values to the various constants in this file.
1087  */
1088