xref: /openbsd-src/sys/dev/pv/vmt.c (revision fc405d53b73a2d73393cb97f684863d17b583e38)
1 /*	$OpenBSD: vmt.c,v 1.30 2023/01/07 06:40:21 asou Exp $ */
2 
3 /*
4  * Copyright (c) 2007 David Crawshaw <david@zentus.com>
5  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #if !defined(__i386__) && !defined(__amd64__)
21 #error vmt(4) is only supported on i386 and amd64
22 #endif
23 
24 /*
25  * Protocol reverse engineered by Ken Kato:
26  * https://sites.google.com/site/chitchatvmback/backdoor
27  */
28 
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/timeout.h>
34 #include <sys/signalvar.h>
35 #include <sys/syslog.h>
36 #include <sys/proc.h>
37 #include <sys/socket.h>
38 #include <sys/ioctl.h>
39 #include <sys/mount.h>
40 #include <sys/task.h>
41 #include <sys/sensors.h>
42 
43 #include <net/if.h>
44 #include <net/if_dl.h>
45 #include <net/if_var.h>
46 #include <net/if_types.h>
47 #include <net/rtable.h>
48 #include <netinet/in.h>
49 #include <netinet/if_ether.h>
50 
51 #include <dev/pv/pvvar.h>
52 
53 /* "The" magic number, always occupies the EAX register. */
54 #define VM_MAGIC			0x564D5868
55 
56 /* Port numbers, passed on EDX.LOW . */
57 #define VM_PORT_CMD			0x5658
58 #define VM_PORT_RPC			0x5659
59 
60 /* Commands, passed on ECX.LOW. */
61 #define VM_CMD_GET_SPEED		0x01
62 #define VM_CMD_APM			0x02
63 #define VM_CMD_GET_MOUSEPOS		0x04
64 #define VM_CMD_SET_MOUSEPOS		0x05
65 #define VM_CMD_GET_CLIPBOARD_LEN	0x06
66 #define VM_CMD_GET_CLIPBOARD		0x07
67 #define VM_CMD_SET_CLIPBOARD_LEN	0x08
68 #define VM_CMD_SET_CLIPBOARD		0x09
69 #define VM_CMD_GET_VERSION		0x0a
70 #define  VM_VERSION_UNMANAGED			0x7fffffff
71 #define VM_CMD_GET_DEVINFO		0x0b
72 #define VM_CMD_DEV_ADDREMOVE		0x0c
73 #define VM_CMD_GET_GUI_OPTIONS		0x0d
74 #define VM_CMD_SET_GUI_OPTIONS		0x0e
75 #define VM_CMD_GET_SCREEN_SIZE		0x0f
76 #define VM_CMD_GET_HWVER		0x11
77 #define VM_CMD_POPUP_OSNOTFOUND		0x12
78 #define VM_CMD_GET_BIOS_UUID		0x13
79 #define VM_CMD_GET_MEM_SIZE		0x14
80 /*#define VM_CMD_GET_TIME		0x17 */	/* deprecated */
81 #define VM_CMD_RPC			0x1e
82 #define VM_CMD_GET_TIME_FULL		0x2e
83 
84 /* RPC sub-commands, passed on ECX.HIGH. */
85 #define VM_RPC_OPEN			0x00
86 #define VM_RPC_SET_LENGTH		0x01
87 #define VM_RPC_SET_DATA			0x02
88 #define VM_RPC_GET_LENGTH		0x03
89 #define VM_RPC_GET_DATA			0x04
90 #define VM_RPC_GET_END			0x05
91 #define VM_RPC_CLOSE			0x06
92 
93 /* RPC magic numbers, passed on EBX. */
94 #define VM_RPC_OPEN_RPCI	0x49435052UL /* with VM_RPC_OPEN. */
95 #define VM_RPC_OPEN_TCLO	0x4F4C4354UL /* with VP_RPC_OPEN. */
96 #define VM_RPC_ENH_DATA		0x00010000UL /* with enhanced RPC data calls. */
97 
98 #define VM_RPC_FLAG_COOKIE	0x80000000UL
99 
100 /* RPC reply flags */
101 #define VM_RPC_REPLY_SUCCESS	0x0001
102 #define VM_RPC_REPLY_DORECV	0x0002		/* incoming message available */
103 #define VM_RPC_REPLY_CLOSED	0x0004		/* RPC channel is closed */
104 #define VM_RPC_REPLY_UNSENT	0x0008		/* incoming message was removed? */
105 #define VM_RPC_REPLY_CHECKPOINT	0x0010		/* checkpoint occurred -> retry */
106 #define VM_RPC_REPLY_POWEROFF	0x0020		/* underlying device is powering off */
107 #define VM_RPC_REPLY_TIMEOUT	0x0040
108 #define VM_RPC_REPLY_HB		0x0080		/* high-bandwidth tx/rx available */
109 
110 /* VM state change IDs */
111 #define VM_STATE_CHANGE_HALT	1
112 #define VM_STATE_CHANGE_REBOOT	2
113 #define VM_STATE_CHANGE_POWERON 3
114 #define VM_STATE_CHANGE_RESUME  4
115 #define VM_STATE_CHANGE_SUSPEND 5
116 
117 /* VM guest info keys */
118 #define VM_GUEST_INFO_DNS_NAME		1
119 #define VM_GUEST_INFO_IP_ADDRESS	2
120 #define VM_GUEST_INFO_DISK_FREE_SPACE	3
121 #define VM_GUEST_INFO_BUILD_NUMBER	4
122 #define VM_GUEST_INFO_OS_NAME_FULL	5
123 #define VM_GUEST_INFO_OS_NAME		6
124 #define VM_GUEST_INFO_UPTIME		7
125 #define VM_GUEST_INFO_MEMORY		8
126 #define VM_GUEST_INFO_IP_ADDRESS_V2	9
127 #define VM_GUEST_INFO_IP_ADDRESS_V3	10
128 
129 /* RPC responses */
130 #define VM_RPC_REPLY_OK			"OK "
131 #define VM_RPC_RESET_REPLY		"OK ATR toolbox"
132 #define VM_RPC_REPLY_ERROR		"ERROR Unknown command"
133 #define VM_RPC_REPLY_ERROR_IP_ADDR	"ERROR Unable to find guest IP address"
134 
135 /* VM backup error codes */
136 #define VM_BACKUP_SUCCESS		0
137 #define VM_BACKUP_SYNC_ERROR		3
138 #define VM_BACKUP_REMOTE_ABORT		4
139 
140 #define VM_BACKUP_TIMEOUT		30 /* seconds */
141 
142 /* NIC/IP address stuff */
143 #define VM_NICINFO_VERSION		3
144 
145 #define VM_NICINFO_IP_LEN		64
146 #define VM_NICINFO_MAX_NICS		16
147 #define VM_NICINFO_MAX_ADDRS		2048
148 #define VM_NICINFO_MAC_LEN		20
149 
150 #define VM_NICINFO_ADDR_IPV4		1
151 #define VM_NICINFO_ADDR_IPV6		2
152 
153 struct vm_nicinfo_addr_v4 {
154 	uint32_t	v4_addr_type;
155 	uint32_t	v4_addr_len;
156 	struct in_addr	v4_addr;
157 	uint32_t	v4_prefix_len;
158 	uint32_t	v4_origin;
159 	uint32_t	v4_status;
160 };
161 
162 struct vm_nicinfo_addr_v6 {
163 	uint32_t	v6_addr_type;
164 	uint32_t	v6_addr_len;
165 	struct in6_addr v6_addr;
166 	uint32_t	v6_prefix_len;
167 	uint32_t	v6_origin;
168 	uint32_t	v6_status;
169 };
170 
171 struct vm_nicinfo_nic {
172 	uint32_t	ni_mac_len;
173 	char		ni_mac[VM_NICINFO_MAC_LEN];
174 	uint32_t	ni_num_addrs;
175 };
176 
177 struct vm_nicinfo_nic_nomac {
178 	uint32_t	nn_mac_len;
179 	uint32_t	nn_num_addrs;
180 };
181 
182 struct vm_nicinfo_nic_post {
183 	uint32_t	np_dns_config;
184 	uint32_t	np_wins_config;
185 	uint32_t	np_dhcpv4_config;
186 	uint32_t	np_dhcpv6_config;
187 };
188 
189 struct vm_nicinfo_nic_list {
190 	uint32_t	nl_version;
191 	uint32_t	nl_nic_list;
192 	uint32_t	nl_num_nics;
193 };
194 
195 struct vm_nicinfo_nic_list_post {
196 	uint32_t	nl_num_routes;
197 	uint32_t	nl_dns_config;
198 	uint32_t	nl_wins_config;
199 	uint32_t	nl_dhcpv4_config;
200 	uint32_t	nl_dhcpv6_config;
201 };
202 
203 #define VM_NICINFO_CMD			"SetGuestInfo  10 "
204 
205 /* A register. */
206 union vm_reg {
207 	struct {
208 		uint16_t low;
209 		uint16_t high;
210 	} part;
211 	uint32_t word;
212 #ifdef __amd64__
213 	struct {
214 		uint32_t low;
215 		uint32_t high;
216 	} words;
217 	uint64_t quad;
218 #endif
219 } __packed;
220 
221 /* A register frame. */
222 struct vm_backdoor {
223 	union vm_reg eax;
224 	union vm_reg ebx;
225 	union vm_reg ecx;
226 	union vm_reg edx;
227 	union vm_reg esi;
228 	union vm_reg edi;
229 	union vm_reg ebp;
230 } __packed;
231 
232 /* RPC context. */
233 struct vm_rpc {
234 	uint16_t channel;
235 	uint32_t cookie1;
236 	uint32_t cookie2;
237 };
238 
239 struct vmt_softc {
240 	struct device		sc_dev;
241 
242 	struct vm_rpc		sc_tclo_rpc;
243 	char			*sc_rpc_buf;
244 	int			sc_rpc_error;
245 	int			sc_tclo_ping;
246 	int			sc_set_guest_os;
247 	int			sc_quiesce;
248 	struct task		sc_quiesce_task;
249 	struct task		sc_nicinfo_task;
250 #define VMT_RPC_BUFLEN		4096
251 
252 	struct timeout		sc_tick;
253 	struct timeout		sc_tclo_tick;
254 	struct ksensordev	sc_sensordev;
255 	struct ksensor		sc_sensor;
256 
257 	char			sc_hostname[MAXHOSTNAMELEN];
258 	size_t			sc_nic_info_size;
259 	char			*sc_nic_info;
260 };
261 
262 #ifdef VMT_DEBUG
263 #define DPRINTF(_arg...)	printf(_arg)
264 #else
265 #define DPRINTF(_arg...)	do {} while(0)
266 #endif
267 #define DEVNAME(_s)		((_s)->sc_dev.dv_xname)
268 
269 void	 vm_cmd(struct vm_backdoor *);
270 void	 vm_ins(struct vm_backdoor *);
271 void	 vm_outs(struct vm_backdoor *);
272 
273 /* Functions for communicating with the VM Host. */
274 int	 vm_rpc_open(struct vm_rpc *, uint32_t);
275 int	 vm_rpc_close(struct vm_rpc *);
276 int	 vm_rpc_send(const struct vm_rpc *, const uint8_t *, uint32_t);
277 int	 vm_rpc_send_str(const struct vm_rpc *, const uint8_t *);
278 int	 vm_rpc_get_length(const struct vm_rpc *, uint32_t *, uint16_t *);
279 int	 vm_rpc_get_data(const struct vm_rpc *, char *, uint32_t, uint16_t);
280 int	 vm_rpc_send_rpci_tx_buf(struct vmt_softc *, const uint8_t *, uint32_t);
281 int	 vm_rpc_send_rpci_tx(struct vmt_softc *, const char *, ...)
282 	    __attribute__((__format__(__kprintf__,2,3)));
283 int	 vm_rpci_response_successful(struct vmt_softc *);
284 
285 int	 vmt_kvop(void *, int, char *, char *, size_t);
286 
287 void	 vmt_probe_cmd(struct vm_backdoor *, uint16_t);
288 void	 vmt_tclo_state_change_success(struct vmt_softc *, int, char);
289 void	 vmt_do_reboot(struct vmt_softc *);
290 void	 vmt_do_shutdown(struct vmt_softc *);
291 void	 vmt_shutdown(void *);
292 
293 void	 vmt_clear_guest_info(struct vmt_softc *);
294 void	 vmt_update_guest_info(struct vmt_softc *);
295 void	 vmt_update_guest_uptime(struct vmt_softc *);
296 
297 void	 vmt_tick_hook(struct device *self);
298 void	 vmt_tick(void *);
299 void	 vmt_resume(void);
300 
301 int	 vmt_match(struct device *, void *, void *);
302 void	 vmt_attach(struct device *, struct device *, void *);
303 int	 vmt_activate(struct device *, int);
304 
305 void	 vmt_tclo_tick(void *);
306 int	 vmt_tclo_process(struct vmt_softc *, const char *);
307 void	 vmt_tclo_reset(struct vmt_softc *);
308 void	 vmt_tclo_ping(struct vmt_softc *);
309 void	 vmt_tclo_halt(struct vmt_softc *);
310 void	 vmt_tclo_reboot(struct vmt_softc *);
311 void	 vmt_tclo_poweron(struct vmt_softc *);
312 void	 vmt_tclo_suspend(struct vmt_softc *);
313 void	 vmt_tclo_resume(struct vmt_softc *);
314 void	 vmt_tclo_capreg(struct vmt_softc *);
315 void	 vmt_tclo_broadcastip(struct vmt_softc *);
316 
317 void	 vmt_set_backup_status(struct vmt_softc *, const char *, int,
318 	    const char *);
319 void	 vmt_quiesce_task(void *);
320 void	 vmt_quiesce_done_task(void *);
321 void	 vmt_tclo_abortbackup(struct vmt_softc *);
322 void	 vmt_tclo_startbackup(struct vmt_softc *);
323 void	 vmt_tclo_backupdone(struct vmt_softc *);
324 
325 size_t	 vmt_xdr_ifaddr(struct ifaddr *, char *);
326 size_t	 vmt_xdr_nic_entry(struct ifnet *, char *);
327 size_t	 vmt_xdr_nic_info(char *);
328 void	 vmt_nicinfo_task(void *);
329 
330 int	 vmt_probe(void);
331 
332 struct vmt_tclo_rpc {
333 	const char	*name;
334 	void		(*cb)(struct vmt_softc *);
335 } vmt_tclo_rpc[] = {
336 	/* Keep sorted by name (case-sensitive) */
337         { "Capabilities_Register",      vmt_tclo_capreg },
338         { "OS_Halt",                    vmt_tclo_halt },
339         { "OS_PowerOn",                 vmt_tclo_poweron },
340         { "OS_Reboot",                  vmt_tclo_reboot },
341         { "OS_Resume",                  vmt_tclo_resume },
342         { "OS_Suspend",                 vmt_tclo_suspend },
343         { "Set_Option broadcastIP 1",   vmt_tclo_broadcastip },
344         { "ping",                       vmt_tclo_ping },
345         { "reset",                      vmt_tclo_reset },
346         { "vmbackup.abort",		vmt_tclo_abortbackup },
347         { "vmbackup.snapshotDone",	vmt_tclo_backupdone },
348         { "vmbackup.start 1",		vmt_tclo_startbackup },
349         { NULL },
350 #if 0
351 	/* Various unsupported commands */
352 	{ "Set_Option autohide 0" },
353 	{ "Set_Option copypaste 1" },
354 	{ "Set_Option enableDnD 1" },
355 	{ "Set_Option enableMessageBusTunnel 0" },
356 	{ "Set_Option linkRootHgfsShare 0" },
357 	{ "Set_Option mapRootHgfsShare 0" },
358 	{ "Set_Option synctime 1" },
359 	{ "Set_Option synctime.period 0" },
360 	{ "Set_Option time.synchronize.tools.enable 1" },
361 	{ "Set_Option time.synchronize.tools.percentCorrection 0" },
362 	{ "Set_Option time.synchronize.tools.slewCorrection 1" },
363 	{ "Set_Option time.synchronize.tools.startup 1" },
364 	{ "Set_Option toolScripts.afterPowerOn 1" },
365 	{ "Set_Option toolScripts.afterResume 1" },
366 	{ "Set_Option toolScripts.beforePowerOff 1" },
367 	{ "Set_Option toolScripts.beforeSuspend 1" },
368 	{ "Time_Synchronize 0" },
369 	{ "Vix_1_Relayed_Command \"38cdcae40e075d66\"" },
370 #endif
371 };
372 
373 const struct cfattach vmt_ca = {
374 	sizeof(struct vmt_softc),
375 	vmt_match,
376 	vmt_attach,
377 	NULL,
378 	vmt_activate
379 };
380 
381 struct cfdriver vmt_cd = {
382 	NULL,
383 	"vmt",
384 	DV_DULL
385 };
386 
387 extern char hostname[MAXHOSTNAMELEN];
388 
389 void
390 vmt_probe_cmd(struct vm_backdoor *frame, uint16_t cmd)
391 {
392 	bzero(frame, sizeof(*frame));
393 
394 	(frame->eax).word = VM_MAGIC;
395 	(frame->ebx).word = ~VM_MAGIC;
396 	(frame->ecx).part.low = cmd;
397 	(frame->ecx).part.high = 0xffff;
398 	(frame->edx).part.low  = VM_PORT_CMD;
399 	(frame->edx).part.high = 0;
400 
401 	vm_cmd(frame);
402 }
403 
404 int
405 vmt_probe(void)
406 {
407 	struct vm_backdoor frame;
408 
409 	vmt_probe_cmd(&frame, VM_CMD_GET_VERSION);
410 	if (frame.eax.word == 0xffffffff ||
411 	    frame.ebx.word != VM_MAGIC)
412 		return (0);
413 
414 	vmt_probe_cmd(&frame, VM_CMD_GET_SPEED);
415 	if (frame.eax.word == VM_MAGIC)
416 		return (0);
417 
418 	return (1);
419 }
420 
421 int
422 vmt_match(struct device *parent, void *match, void *aux)
423 {
424 	struct pv_attach_args	*pva = aux;
425 	struct pvbus_hv		*hv = &pva->pva_hv[PVBUS_VMWARE];
426 
427 	if (hv->hv_base == 0)
428 		return (0);
429 	if (!vmt_probe())
430 		return (0);
431 
432 	return (1);
433 }
434 
435 void
436 vmt_attach(struct device *parent, struct device *self, void *aux)
437 {
438 	struct vmt_softc *sc = (struct vmt_softc *)self;
439 	struct pv_attach_args	*pva = aux;
440 	struct pvbus_hv		*hv = &pva->pva_hv[PVBUS_VMWARE];
441 
442 	printf("\n");
443 	sc->sc_rpc_buf = malloc(VMT_RPC_BUFLEN, M_DEVBUF, M_NOWAIT);
444 	if (sc->sc_rpc_buf == NULL) {
445 		printf("%s: unable to allocate buffer for RPC\n",
446 		    DEVNAME(sc));
447 		return;
448 	}
449 
450 	if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
451 		printf("%s: failed to open backdoor RPC channel "
452 		    "(TCLO protocol)\n", DEVNAME(sc));
453 		goto free;
454 	}
455 
456 	/* don't know if this is important at all yet */
457 	if (vm_rpc_send_rpci_tx(sc,
458 	    "tools.capability.hgfs_server toolbox 1") != 0) {
459 		printf(": failed to set HGFS server capability\n");
460 		goto free;
461 	}
462 
463 	strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
464 	    sizeof(sc->sc_sensordev.xname));
465 
466 	sc->sc_sensor.type = SENSOR_TIMEDELTA;
467 	sc->sc_sensor.status = SENSOR_S_UNKNOWN;
468 
469 	sensor_attach(&sc->sc_sensordev, &sc->sc_sensor);
470 	sensordev_install(&sc->sc_sensordev);
471 
472 	config_mountroot(self, vmt_tick_hook);
473 
474 	timeout_set(&sc->sc_tclo_tick, vmt_tclo_tick, sc);
475 	timeout_add_sec(&sc->sc_tclo_tick, 1);
476 	sc->sc_tclo_ping = 1;
477 
478 	task_set(&sc->sc_nicinfo_task, vmt_nicinfo_task, sc);
479 
480 	/* pvbus(4) key/value interface */
481 	hv->hv_kvop = vmt_kvop;
482 	hv->hv_arg = sc;
483 
484 	return;
485 
486 free:
487 	free(sc->sc_rpc_buf, M_DEVBUF, VMT_RPC_BUFLEN);
488 }
489 
490 int
491 vmt_kvop(void *arg, int op, char *key, char *value, size_t valuelen)
492 {
493 	struct vmt_softc *sc = arg;
494 	struct vm_rpc rpci;
495 	char *buf = NULL;
496 	size_t bufsz;
497 	int error = 0;
498 	uint32_t rlen;
499 	uint16_t ack;
500 
501 	bufsz = VMT_RPC_BUFLEN;
502 	buf = malloc(bufsz, M_TEMP, M_WAITOK | M_ZERO);
503 
504 	switch (op) {
505 	case PVBUS_KVWRITE:
506 		if ((size_t)snprintf(buf, bufsz, "info-set %s %s",
507 		    key, value) >= bufsz) {
508 			DPRINTF("%s: write command too long", DEVNAME(sc));
509 			error = EINVAL;
510 			goto done;
511 		}
512 		break;
513 	case PVBUS_KVREAD:
514 		if ((size_t)snprintf(buf, bufsz, "info-get %s",
515 		    key) >= bufsz) {
516 			DPRINTF("%s: read command too long", DEVNAME(sc));
517 			error = EINVAL;
518 			goto done;
519 		}
520 		break;
521 	default:
522 		error = EOPNOTSUPP;
523 		goto done;
524 	}
525 
526 	if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
527 		DPRINTF("%s: rpci channel open failed\n", DEVNAME(sc));
528 		sc->sc_rpc_error = 1;
529 		error = EIO;
530 		goto done;
531 	}
532 
533 	if (vm_rpc_send(&rpci, buf, bufsz) != 0) {
534 		DPRINTF("%s: unable to send rpci command\n", DEVNAME(sc));
535 		sc->sc_rpc_error = 1;
536 		error = EIO;
537 		goto close;
538 	}
539 
540 	if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
541 		DPRINTF("%s: failed to get length of rpci response data\n",
542 		    DEVNAME(sc));
543 		sc->sc_rpc_error = 1;
544 		error = EIO;
545 		goto close;
546 	}
547 
548 	if (rlen > 0) {
549 		if (rlen + 1 > valuelen) {
550 			error = ERANGE;
551 			goto close;
552 		}
553 
554 		if (vm_rpc_get_data(&rpci, value, rlen, ack) != 0) {
555 			DPRINTF("%s: failed to get rpci response data\n",
556 			    DEVNAME(sc));
557 			sc->sc_rpc_error = 1;
558 			error = EIO;
559 			goto close;
560 		}
561 		/* test if response success  */
562 		if (rlen < 2 || value[0] != '1' || value[1] != ' ') {
563 			DPRINTF("%s: host rejected command: %s\n", DEVNAME(sc),
564 			    buf);
565 			error = EINVAL;
566 			goto close;
567 		}
568 		/* skip response that was tested */
569 		bcopy(value + 2, value, valuelen - 2);
570 		value[rlen - 2] = '\0';
571 	}
572 
573  close:
574 	if (vm_rpc_close(&rpci) != 0)
575 		DPRINTF("%s: unable to close rpci channel\n", DEVNAME(sc));
576  done:
577 	free(buf, M_TEMP, bufsz);
578 	return (error);
579 }
580 
581 void
582 vmt_resume(void)
583 {
584 	struct vm_backdoor frame;
585 	extern void rdrand(void *);
586 
587 	bzero(&frame, sizeof(frame));
588 	frame.eax.word = VM_MAGIC;
589 	frame.ecx.part.low = VM_CMD_GET_TIME_FULL;
590 	frame.edx.part.low  = VM_PORT_CMD;
591 	vm_cmd(&frame);
592 
593 	rdrand(NULL);
594 	enqueue_randomness(frame.eax.word);
595 	enqueue_randomness(frame.esi.word);
596 	enqueue_randomness(frame.edx.word);
597 	enqueue_randomness(frame.ebx.word);
598 	resume_randomness(NULL, 0);
599 }
600 
601 int
602 vmt_activate(struct device *self, int act)
603 {
604 	int rv = 0;
605 
606 	switch (act) {
607 	case DVACT_POWERDOWN:
608 		vmt_shutdown(self);
609 		break;
610 	case DVACT_RESUME:
611 		vmt_resume();
612 		break;
613 	}
614 	return (rv);
615 }
616 
617 
618 void
619 vmt_update_guest_uptime(struct vmt_softc *sc)
620 {
621 	/* host wants uptime in hundredths of a second */
622 	if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %lld00",
623 	    VM_GUEST_INFO_UPTIME, (long long)getuptime()) != 0) {
624 		DPRINTF("%s: unable to set guest uptime", DEVNAME(sc));
625 		sc->sc_rpc_error = 1;
626 	}
627 }
628 
629 void
630 vmt_clear_guest_info(struct vmt_softc *sc)
631 {
632 	if (sc->sc_nic_info_size != 0) {
633 		free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size);
634 		sc->sc_nic_info = NULL;
635 		sc->sc_nic_info_size = 0;
636 	}
637 	sc->sc_hostname[0] = '\0';
638 	sc->sc_set_guest_os = 0;
639 }
640 
641 void
642 vmt_update_guest_info(struct vmt_softc *sc)
643 {
644 	if (strncmp(sc->sc_hostname, hostname, sizeof(sc->sc_hostname)) != 0) {
645 		strlcpy(sc->sc_hostname, hostname, sizeof(sc->sc_hostname));
646 
647 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
648 		    VM_GUEST_INFO_DNS_NAME, sc->sc_hostname) != 0) {
649 			DPRINTF("%s: unable to set hostname", DEVNAME(sc));
650 			sc->sc_rpc_error = 1;
651 		}
652 	}
653 
654 	if (sc->sc_set_guest_os == 0) {
655 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s %s %s",
656 		    VM_GUEST_INFO_OS_NAME_FULL,
657 		    ostype, osrelease, osversion) != 0) {
658 			DPRINTF("%s: unable to set full guest OS", DEVNAME(sc));
659 			sc->sc_rpc_error = 1;
660 		}
661 
662 		/*
663 		 * Host doesn't like it if we send an OS name it doesn't
664 		 * recognise, so use the closest match, which happens
665 		 * to be FreeBSD.
666 		 */
667 
668 		if (vm_rpc_send_rpci_tx(sc, "SetGuestInfo  %d %s",
669 		    VM_GUEST_INFO_OS_NAME, "FreeBSD") != 0) {
670 			DPRINTF("%s: unable to set guest OS", DEVNAME(sc));
671 			sc->sc_rpc_error = 1;
672 		}
673 
674 		sc->sc_set_guest_os = 1;
675 	}
676 
677 	task_add(systq, &sc->sc_nicinfo_task);
678 }
679 
680 void
681 vmt_tick_hook(struct device *self)
682 {
683 	struct vmt_softc *sc = (struct vmt_softc *)self;
684 
685 	timeout_set(&sc->sc_tick, vmt_tick, sc);
686 	vmt_tick(sc);
687 }
688 
689 void
690 vmt_tick(void *xarg)
691 {
692 	struct vmt_softc *sc = xarg;
693 	struct vm_backdoor frame;
694 	struct timeval *guest = &sc->sc_sensor.tv;
695 	struct timeval host, diff;
696 
697 	microtime(guest);
698 
699 	bzero(&frame, sizeof(frame));
700 	frame.eax.word = VM_MAGIC;
701 	frame.ecx.part.low = VM_CMD_GET_TIME_FULL;
702 	frame.edx.part.low  = VM_PORT_CMD;
703 	vm_cmd(&frame);
704 
705 	if (frame.eax.word != 0xffffffff) {
706 		host.tv_sec = ((uint64_t)frame.esi.word << 32) | frame.edx.word;
707 		host.tv_usec = frame.ebx.word;
708 
709 		timersub(guest, &host, &diff);
710 
711 		sc->sc_sensor.value = (u_int64_t)diff.tv_sec * 1000000000LL +
712 		    (u_int64_t)diff.tv_usec * 1000LL;
713 		sc->sc_sensor.status = SENSOR_S_OK;
714 	} else {
715 		sc->sc_sensor.status = SENSOR_S_UNKNOWN;
716 	}
717 
718 	vmt_update_guest_info(sc);
719 	vmt_update_guest_uptime(sc);
720 
721 	timeout_add_sec(&sc->sc_tick, 15);
722 }
723 
724 void
725 vmt_tclo_state_change_success(struct vmt_softc *sc, int success, char state)
726 {
727 	if (vm_rpc_send_rpci_tx(sc, "tools.os.statechange.status %d %d",
728 	    success, state) != 0) {
729 		DPRINTF("%s: unable to send state change result\n",
730 		    DEVNAME(sc));
731 		sc->sc_rpc_error = 1;
732 	}
733 }
734 
735 void
736 vmt_do_shutdown(struct vmt_softc *sc)
737 {
738 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_HALT);
739 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
740 	pvbus_shutdown(&sc->sc_dev);
741 }
742 
743 void
744 vmt_do_reboot(struct vmt_softc *sc)
745 {
746 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_REBOOT);
747 	vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK);
748 	pvbus_reboot(&sc->sc_dev);
749 }
750 
751 void
752 vmt_shutdown(void *arg)
753 {
754 	struct vmt_softc *sc = arg;
755 
756 	if (vm_rpc_send_rpci_tx(sc,
757 	    "tools.capability.hgfs_server toolbox 0") != 0) {
758 		DPRINTF("%s: failed to disable hgfs server capability\n",
759 		    DEVNAME(sc));
760 	}
761 
762 	if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
763 		DPRINTF("%s: failed to send shutdown ping\n", DEVNAME(sc));
764 	}
765 
766 	vm_rpc_close(&sc->sc_tclo_rpc);
767 }
768 
769 void
770 vmt_tclo_reset(struct vmt_softc *sc)
771 {
772 	if (sc->sc_rpc_error != 0) {
773 		DPRINTF("%s: resetting rpc\n", DEVNAME(sc));
774 		vm_rpc_close(&sc->sc_tclo_rpc);
775 
776 		/* reopen and send the reset reply next time around */
777 		sc->sc_rpc_error = 1;
778 		return;
779 	}
780 
781 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_RESET_REPLY) != 0) {
782 		DPRINTF("%s: failed to send reset reply\n", DEVNAME(sc));
783 		sc->sc_rpc_error = 1;
784 	}
785 }
786 
787 void
788 vmt_tclo_ping(struct vmt_softc *sc)
789 {
790 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
791 		DPRINTF("%s: error sending ping response\n", DEVNAME(sc));
792 		sc->sc_rpc_error = 1;
793 	}
794 }
795 
796 void
797 vmt_tclo_halt(struct vmt_softc *sc)
798 {
799 	vmt_do_shutdown(sc);
800 }
801 
802 void
803 vmt_tclo_reboot(struct vmt_softc *sc)
804 {
805 	vmt_do_reboot(sc);
806 }
807 
808 void
809 vmt_tclo_poweron(struct vmt_softc *sc)
810 {
811 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_POWERON);
812 
813 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
814 		DPRINTF("%s: error sending poweron response\n", DEVNAME(sc));
815 		sc->sc_rpc_error = 1;
816 	}
817 }
818 
819 void
820 vmt_tclo_suspend(struct vmt_softc *sc)
821 {
822 	log(LOG_KERN | LOG_NOTICE,
823 	    "VMware guest entering suspended state\n");
824 
825 	suspend_randomness();
826 
827 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_SUSPEND);
828 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
829 		DPRINTF("%s: error sending suspend response\n", DEVNAME(sc));
830 		sc->sc_rpc_error = 1;
831 	}
832 }
833 
834 void
835 vmt_tclo_resume(struct vmt_softc *sc)
836 {
837 	log(LOG_KERN | LOG_NOTICE,
838 	    "VMware guest resuming from suspended state\n");
839 
840 	/* force guest info update */
841 	vmt_clear_guest_info(sc);
842 	vmt_update_guest_info(sc);
843 	vmt_resume();
844 
845 	vmt_tclo_state_change_success(sc, 1, VM_STATE_CHANGE_RESUME);
846 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
847 		DPRINTF("%s: error sending resume response\n", DEVNAME(sc));
848 		sc->sc_rpc_error = 1;
849 	}
850 }
851 
852 void
853 vmt_tclo_capreg(struct vmt_softc *sc)
854 {
855 	/* don't know if this is important at all */
856 	if (vm_rpc_send_rpci_tx(sc,
857 	    "vmx.capability.unified_loop toolbox") != 0) {
858 		DPRINTF("%s: unable to set unified loop\n", DEVNAME(sc));
859 		sc->sc_rpc_error = 1;
860 	}
861 
862 	if (vm_rpci_response_successful(sc) == 0) {
863 		DPRINTF("%s: host rejected unified loop setting\n",
864 		    DEVNAME(sc));
865 	}
866 
867 	/* the trailing space is apparently important here */
868 	if (vm_rpc_send_rpci_tx(sc,
869 	    "tools.capability.statechange ") != 0) {
870 		DPRINTF("%s: unable to send statechange capability\n",
871 		    DEVNAME(sc));
872 		sc->sc_rpc_error = 1;
873 	}
874 
875 	if (vm_rpci_response_successful(sc) == 0) {
876 		DPRINTF("%s: host rejected statechange capability\n",
877 		    DEVNAME(sc));
878 	}
879 
880 	if (vm_rpc_send_rpci_tx(sc, "tools.set.version %u",
881 	    VM_VERSION_UNMANAGED) != 0) {
882 		DPRINTF("%s: unable to set tools version\n",
883 		    DEVNAME(sc));
884 		sc->sc_rpc_error = 1;
885 	}
886 
887 	vmt_clear_guest_info(sc);
888 	vmt_update_guest_uptime(sc);
889 
890 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, VM_RPC_REPLY_OK) != 0) {
891 		DPRINTF("%s: error sending capabilities_register"
892 		    " response\n", DEVNAME(sc));
893 		sc->sc_rpc_error = 1;
894 	}
895 }
896 
897 void
898 vmt_tclo_broadcastip(struct vmt_softc *sc)
899 {
900 	struct ifnet *iface;
901 	struct sockaddr_in *guest_ip;
902 
903 	/* find first available ipv4 address */
904 	guest_ip = NULL;
905 	TAILQ_FOREACH(iface, &ifnetlist, if_list) {
906 		struct ifaddr *iface_addr;
907 
908 		/* skip loopback */
909 		if (strncmp(iface->if_xname, "lo", 2) == 0 &&
910 		    iface->if_xname[2] >= '0' &&
911 		    iface->if_xname[2] <= '9') {
912 			continue;
913 		}
914 
915 		TAILQ_FOREACH(iface_addr, &iface->if_addrlist,
916 		    ifa_list) {
917 			if (iface_addr->ifa_addr->sa_family != AF_INET)
918 				continue;
919 
920 			guest_ip = satosin(iface_addr->ifa_addr);
921 			break;
922 		}
923 	}
924 
925 	if (guest_ip != NULL) {
926 		char ip[INET_ADDRSTRLEN];
927 
928 		inet_ntop(AF_INET, &guest_ip->sin_addr, ip, sizeof(ip));
929 		if (vm_rpc_send_rpci_tx(sc, "info-set guestinfo.ip %s",
930 		    ip) != 0) {
931 			DPRINTF("%s: unable to send guest IP address\n",
932 			    DEVNAME(sc));
933 			sc->sc_rpc_error = 1;
934 		}
935 
936 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
937 		    VM_RPC_REPLY_OK) != 0) {
938 			DPRINTF("%s: error sending broadcastIP"
939 			    " response\n", DEVNAME(sc));
940 			sc->sc_rpc_error = 1;
941 		}
942 	} else {
943 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
944 		    VM_RPC_REPLY_ERROR_IP_ADDR) != 0) {
945 			DPRINTF("%s: error sending broadcastIP"
946 			    " error response\n", DEVNAME(sc));
947 			sc->sc_rpc_error = 1;
948 		}
949 	}
950 }
951 
952 void
953 vmt_set_backup_status(struct vmt_softc *sc, const char *state, int code,
954     const char *desc)
955 {
956 	if (vm_rpc_send_rpci_tx(sc, "vmbackup.eventSet %s %d %s",
957 	    state, code, desc) != 0) {
958 		DPRINTF("%s: setting backup status failed\n", DEVNAME(sc));
959 	}
960 }
961 
962 void
963 vmt_quiesce_task(void *data)
964 {
965 	struct vmt_softc *sc = data;
966 	int err;
967 
968 	DPRINTF("%s: quiescing filesystems for backup\n", DEVNAME(sc));
969 	err = vfs_stall(curproc, 1);
970 	if (err != 0) {
971 		printf("%s: unable to quiesce filesystems\n", DEVNAME(sc));
972 		vfs_stall(curproc, 0);
973 
974 		vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_SYNC_ERROR,
975 		    "vfs_stall failed");
976 		vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, "");
977 		sc->sc_quiesce = 0;
978 		return;
979 	}
980 
981 	DPRINTF("%s: filesystems quiesced\n", DEVNAME(sc));
982 	vmt_set_backup_status(sc, "prov.snapshotCommit", VM_BACKUP_SUCCESS, "");
983 }
984 
985 void
986 vmt_quiesce_done_task(void *data)
987 {
988 	struct vmt_softc *sc = data;
989 
990 	vfs_stall(curproc, 0);
991 
992 	if (sc->sc_quiesce == -1)
993 		vmt_set_backup_status(sc, "req.aborted", VM_BACKUP_REMOTE_ABORT,
994 		    "");
995 
996 	vmt_set_backup_status(sc, "req.done", VM_BACKUP_SUCCESS, "");
997 	sc->sc_quiesce = 0;
998 }
999 
1000 void
1001 vmt_tclo_abortbackup(struct vmt_softc *sc)
1002 {
1003 	const char *reply = VM_RPC_REPLY_OK;
1004 
1005 	if (sc->sc_quiesce > 0) {
1006 		DPRINTF("%s: aborting backup\n", DEVNAME(sc));
1007 		sc->sc_quiesce = -1;
1008 		task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc);
1009 		task_add(systq, &sc->sc_quiesce_task);
1010 	} else {
1011 		DPRINTF("%s: can't abort, no backup in progress\n",
1012 		    DEVNAME(sc));
1013 		reply = VM_RPC_REPLY_ERROR;
1014 	}
1015 
1016 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1017 		DPRINTF("%s: error sending vmbackup.abort reply\n",
1018 		    DEVNAME(sc));
1019 		sc->sc_rpc_error = 1;
1020 	}
1021 }
1022 
1023 void
1024 vmt_tclo_startbackup(struct vmt_softc *sc)
1025 {
1026 	const char *reply = VM_RPC_REPLY_OK;
1027 
1028 	if (sc->sc_quiesce == 0) {
1029 		DPRINTF("%s: starting quiesce\n", DEVNAME(sc));
1030 		vmt_set_backup_status(sc, "reset", VM_BACKUP_SUCCESS, "");
1031 
1032 		task_set(&sc->sc_quiesce_task, vmt_quiesce_task, sc);
1033 		task_add(systq, &sc->sc_quiesce_task);
1034 		sc->sc_quiesce = 1;
1035 	} else {
1036 		DPRINTF("%s: can't start backup, already in progress\n",
1037 		    DEVNAME(sc));
1038 		reply = VM_RPC_REPLY_ERROR;
1039 	}
1040 
1041 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1042 		DPRINTF("%s: error sending vmbackup.start reply\n",
1043 		    DEVNAME(sc));
1044 		sc->sc_rpc_error = 1;
1045 	}
1046 }
1047 
1048 void
1049 vmt_tclo_backupdone(struct vmt_softc *sc)
1050 {
1051 	const char *reply = VM_RPC_REPLY_OK;
1052 	if (sc->sc_quiesce > 0) {
1053 		DPRINTF("%s: backup complete\n", DEVNAME(sc));
1054 		task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task, sc);
1055 		task_add(systq, &sc->sc_quiesce_task);
1056 	} else {
1057 		DPRINTF("%s: got backup complete, but not doing a backup\n",
1058 		    DEVNAME(sc));
1059 		reply = VM_RPC_REPLY_ERROR;
1060 	}
1061 
1062 	if (vm_rpc_send_str(&sc->sc_tclo_rpc, reply) != 0) {
1063 		DPRINTF("%s: error sending vmbackup.snapshotDone reply\n",
1064 		    DEVNAME(sc));
1065 		sc->sc_rpc_error = 1;
1066 	}
1067 }
1068 
1069 int
1070 vmt_tclo_process(struct vmt_softc *sc, const char *name)
1071 {
1072 	int i;
1073 
1074 	/* Search for rpc command and call handler */
1075 	for (i = 0; vmt_tclo_rpc[i].name != NULL; i++) {
1076 		if (strcmp(vmt_tclo_rpc[i].name, sc->sc_rpc_buf) == 0) {
1077 			vmt_tclo_rpc[i].cb(sc);
1078 			return (0);
1079 		}
1080 	}
1081 
1082 	DPRINTF("%s: unknown command: \"%s\"\n", DEVNAME(sc), name);
1083 
1084 	return (-1);
1085 }
1086 
1087 void
1088 vmt_tclo_tick(void *xarg)
1089 {
1090 	struct vmt_softc *sc = xarg;
1091 	u_int32_t rlen;
1092 	u_int16_t ack;
1093 	int delay;
1094 
1095 	/* By default, poll every second for new messages */
1096 	delay = 1;
1097 
1098 	if (sc->sc_quiesce > 0) {
1099 		/* abort quiesce if it's taking too long */
1100 		if (sc->sc_quiesce++ == VM_BACKUP_TIMEOUT) {
1101 			printf("%s: aborting quiesce\n", DEVNAME(sc));
1102 			sc->sc_quiesce = -1;
1103 			task_set(&sc->sc_quiesce_task, vmt_quiesce_done_task,
1104 			    sc);
1105 			task_add(systq, &sc->sc_quiesce_task);
1106 		} else
1107 			vmt_set_backup_status(sc, "req.keepAlive",
1108 			    VM_BACKUP_SUCCESS, "");
1109 	}
1110 
1111 	/* reopen tclo channel if it's currently closed */
1112 	if (sc->sc_tclo_rpc.channel == 0 &&
1113 	    sc->sc_tclo_rpc.cookie1 == 0 &&
1114 	    sc->sc_tclo_rpc.cookie2 == 0) {
1115 		if (vm_rpc_open(&sc->sc_tclo_rpc, VM_RPC_OPEN_TCLO) != 0) {
1116 			DPRINTF("%s: unable to reopen TCLO channel\n",
1117 			    DEVNAME(sc));
1118 			delay = 15;
1119 			goto out;
1120 		}
1121 
1122 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
1123 		    VM_RPC_RESET_REPLY) != 0) {
1124 			DPRINTF("%s: failed to send reset reply\n",
1125 			    DEVNAME(sc));
1126 			sc->sc_rpc_error = 1;
1127 			goto out;
1128 		} else {
1129 			sc->sc_rpc_error = 0;
1130 		}
1131 	}
1132 
1133 	if (sc->sc_tclo_ping) {
1134 		if (vm_rpc_send(&sc->sc_tclo_rpc, NULL, 0) != 0) {
1135 			DPRINTF("%s: failed to send TCLO outgoing ping\n",
1136 			    DEVNAME(sc));
1137 			sc->sc_rpc_error = 1;
1138 			goto out;
1139 		}
1140 	}
1141 
1142 	if (vm_rpc_get_length(&sc->sc_tclo_rpc, &rlen, &ack) != 0) {
1143 		DPRINTF("%s: failed to get length of incoming TCLO data\n",
1144 		    DEVNAME(sc));
1145 		sc->sc_rpc_error = 1;
1146 		goto out;
1147 	}
1148 
1149 	if (rlen == 0) {
1150 		sc->sc_tclo_ping = 1;
1151 		goto out;
1152 	}
1153 
1154 	if (rlen >= VMT_RPC_BUFLEN) {
1155 		rlen = VMT_RPC_BUFLEN - 1;
1156 	}
1157 	if (vm_rpc_get_data(&sc->sc_tclo_rpc, sc->sc_rpc_buf, rlen, ack) != 0) {
1158 		DPRINTF("%s: failed to get incoming TCLO data\n", DEVNAME(sc));
1159 		sc->sc_rpc_error = 1;
1160 		goto out;
1161 	}
1162 	sc->sc_tclo_ping = 0;
1163 
1164 	/* The VM host can queue multiple messages; continue without delay */
1165 	delay = 0;
1166 
1167 	if (vmt_tclo_process(sc, sc->sc_rpc_buf) != 0) {
1168 		if (vm_rpc_send_str(&sc->sc_tclo_rpc,
1169 		    VM_RPC_REPLY_ERROR) != 0) {
1170 			DPRINTF("%s: error sending unknown command reply\n",
1171 			    DEVNAME(sc));
1172 			sc->sc_rpc_error = 1;
1173 		}
1174 	}
1175 
1176 	if (sc->sc_rpc_error == 1) {
1177 		/* On error, give time to recover and wait a second */
1178 		delay = 1;
1179 	}
1180 
1181 out:
1182 	timeout_add_sec(&sc->sc_tclo_tick, delay);
1183 }
1184 
1185 size_t
1186 vmt_xdr_ifaddr(struct ifaddr *ifa, char *data)
1187 {
1188 	struct sockaddr_in *sin;
1189 	struct vm_nicinfo_addr_v4 v4;
1190 #ifdef INET6
1191 	struct sockaddr_in6 *sin6;
1192 	struct vm_nicinfo_addr_v6 v6;
1193 #endif
1194 
1195 	/* skip loopback addresses and anything that isn't ipv4/v6 */
1196 	switch (ifa->ifa_addr->sa_family) {
1197 	case AF_INET:
1198 		sin = satosin(ifa->ifa_addr);
1199 		if ((ntohl(sin->sin_addr.s_addr) >>
1200 		    IN_CLASSA_NSHIFT) != IN_LOOPBACKNET) {
1201 			if (data != NULL) {
1202 				memset(&v4, 0, sizeof(v4));
1203 				htobem32(&v4.v4_addr_type,
1204 				    VM_NICINFO_ADDR_IPV4);
1205 				htobem32(&v4.v4_addr_len,
1206 				    sizeof(struct in_addr));
1207 				memcpy(&v4.v4_addr, &sin->sin_addr.s_addr,
1208 				    sizeof(struct in_addr));
1209 				htobem32(&v4.v4_prefix_len,
1210 				    rtable_satoplen(AF_INET, ifa->ifa_netmask));
1211 				memcpy(data, &v4, sizeof(v4));
1212 			}
1213 			return (sizeof (v4));
1214 		}
1215 		break;
1216 
1217 #ifdef INET6
1218 	case AF_INET6:
1219 		sin6 = satosin6(ifa->ifa_addr);
1220 		if (!IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
1221 		    !IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
1222 			if (data != NULL) {
1223 				memset(&v6, 0, sizeof(v6));
1224 				htobem32(&v6.v6_addr_type,
1225 				    VM_NICINFO_ADDR_IPV6);
1226 				htobem32(&v6.v6_addr_len,
1227 				    sizeof(sin6->sin6_addr));
1228 				memcpy(&v6.v6_addr, &sin6->sin6_addr,
1229 				    sizeof(sin6->sin6_addr));
1230 				htobem32(&v6.v6_prefix_len,
1231 				    rtable_satoplen(AF_INET6,
1232 				        ifa->ifa_netmask));
1233 				memcpy(data, &v6, sizeof(v6));
1234 			}
1235 			return (sizeof (v6));
1236 		}
1237 		break;
1238 #endif
1239 
1240 	default:
1241 		break;
1242 	}
1243 
1244 	return (0);
1245 }
1246 
1247 size_t
1248 vmt_xdr_nic_entry(struct ifnet *iface, char *data)
1249 {
1250 	struct ifaddr *iface_addr;
1251 	struct sockaddr_dl *sdl;
1252 	struct vm_nicinfo_nic nic;
1253 	struct vm_nicinfo_nic_nomac nnic;
1254 	char *nicdata;
1255 	const char *mac;
1256 	size_t addrsize, total;
1257 	int addrs;
1258 
1259 	total = 0;
1260 	addrs = 0;
1261 
1262 	/* work out if we have a mac address */
1263 	sdl = iface->if_sadl;
1264 	if (sdl != NULL && sdl->sdl_alen &&
1265 	    (sdl->sdl_type == IFT_ETHER || sdl->sdl_type == IFT_CARP))
1266 		mac = ether_sprintf(sdl->sdl_data + sdl->sdl_nlen);
1267 	else
1268 		mac = NULL;
1269 
1270 	if (data != NULL) {
1271 		nicdata = data;
1272 		if (mac != NULL)
1273 			data += sizeof(nic);
1274 		else
1275 			data += sizeof(nnic);
1276 	}
1277 
1278 	TAILQ_FOREACH(iface_addr, &iface->if_addrlist, ifa_list) {
1279 		addrsize = vmt_xdr_ifaddr(iface_addr, data);
1280 		if (addrsize == 0)
1281 			continue;
1282 
1283 		if (data != NULL)
1284 			data += addrsize;
1285 		total += addrsize;
1286 		addrs++;
1287 		if (addrs == VM_NICINFO_MAX_ADDRS)
1288 			break;
1289 	}
1290 
1291 	if (addrs == 0)
1292 		return (0);
1293 
1294 	if (data != NULL) {
1295 		/* fill in mac address, if any */
1296 		if (mac != NULL) {
1297 			memset(&nic, 0, sizeof(nic));
1298 			htobem32(&nic.ni_mac_len, strlen(mac));
1299 			strncpy(nic.ni_mac, mac, VM_NICINFO_MAC_LEN);
1300 			htobem32(&nic.ni_num_addrs, addrs);
1301 			memcpy(nicdata, &nic, sizeof(nic));
1302 		} else {
1303 			nnic.nn_mac_len = 0;
1304 			htobem32(&nnic.nn_num_addrs, addrs);
1305 			memcpy(nicdata, &nnic, sizeof(nnic));
1306 		}
1307 
1308 		/* we don't actually set anything in vm_nicinfo_nic_post */
1309 	}
1310 
1311 	if (mac != NULL)
1312 		total += sizeof(nic);
1313 	else
1314 		total += sizeof(nnic);
1315 	total += sizeof(struct vm_nicinfo_nic_post);
1316 	return (total);
1317 }
1318 
1319 size_t
1320 vmt_xdr_nic_info(char *data)
1321 {
1322 	struct ifnet *iface;
1323 	struct vm_nicinfo_nic_list nl;
1324 	size_t total, nictotal;
1325 	char *listdata = NULL;
1326 	int nics;
1327 
1328 	NET_ASSERT_LOCKED();
1329 
1330 	total = sizeof(nl);
1331 	if (data != NULL) {
1332 		listdata = data;
1333 		data += sizeof(nl);
1334 	}
1335 
1336 	nics = 0;
1337 	TAILQ_FOREACH(iface, &ifnetlist, if_list) {
1338 		nictotal = vmt_xdr_nic_entry(iface, data);
1339 		if (nictotal == 0)
1340 			continue;
1341 
1342 		if (data != NULL)
1343 			data += nictotal;
1344 
1345 		total += nictotal;
1346 		nics++;
1347 		if (nics == VM_NICINFO_MAX_NICS)
1348 			break;
1349 	}
1350 
1351 	if (listdata != NULL) {
1352 		memset(&nl, 0, sizeof(nl));
1353 		htobem32(&nl.nl_version, VM_NICINFO_VERSION);
1354 		htobem32(&nl.nl_nic_list, 1);
1355 		htobem32(&nl.nl_num_nics, nics);
1356 		memcpy(listdata, &nl, sizeof(nl));
1357 	}
1358 
1359 	/* we don't actually set anything in vm_nicinfo_nic_list_post */
1360 	total += sizeof(struct vm_nicinfo_nic_list_post);
1361 
1362 	return (total);
1363 }
1364 
1365 void
1366 vmt_nicinfo_task(void *data)
1367 {
1368 	struct vmt_softc *sc = data;
1369 	size_t nic_info_size;
1370 	char *nic_info;
1371 
1372 	NET_LOCK();
1373 
1374 	nic_info_size = vmt_xdr_nic_info(NULL) + sizeof(VM_NICINFO_CMD) - 1;
1375 	nic_info = malloc(nic_info_size, M_DEVBUF, M_WAITOK | M_ZERO);
1376 
1377 	strncpy(nic_info, VM_NICINFO_CMD, nic_info_size);
1378 	vmt_xdr_nic_info(nic_info + sizeof(VM_NICINFO_CMD) - 1);
1379 
1380 	NET_UNLOCK();
1381 
1382 	if (nic_info_size != sc->sc_nic_info_size ||
1383 	    (memcmp(nic_info, sc->sc_nic_info, nic_info_size) != 0)) {
1384 		if (vm_rpc_send_rpci_tx_buf(sc, nic_info,
1385 		    nic_info_size) != 0) {
1386 			DPRINTF("%s: unable to send nic info",
1387 			    DEVNAME(sc));
1388 			sc->sc_rpc_error = 1;
1389 		}
1390 
1391 		free(sc->sc_nic_info, M_DEVBUF, sc->sc_nic_info_size);
1392 		sc->sc_nic_info = nic_info;
1393 		sc->sc_nic_info_size = nic_info_size;
1394 	} else {
1395 		free(nic_info, M_DEVBUF, nic_info_size);
1396 	}
1397 }
1398 
1399 #define BACKDOOR_OP_I386(op, frame)		\
1400 	__asm__ volatile (			\
1401 		"pushal;"			\
1402 		"pushl %%eax;"			\
1403 		"movl 0x18(%%eax), %%ebp;"	\
1404 		"movl 0x14(%%eax), %%edi;"	\
1405 		"movl 0x10(%%eax), %%esi;"	\
1406 		"movl 0x0c(%%eax), %%edx;"	\
1407 		"movl 0x08(%%eax), %%ecx;"	\
1408 		"movl 0x04(%%eax), %%ebx;"	\
1409 		"movl 0x00(%%eax), %%eax;"	\
1410 		op				\
1411 		"xchgl %%eax, 0x00(%%esp);"	\
1412 		"movl %%ebp, 0x18(%%eax);"	\
1413 		"movl %%edi, 0x14(%%eax);"	\
1414 		"movl %%esi, 0x10(%%eax);"	\
1415 		"movl %%edx, 0x0c(%%eax);"	\
1416 		"movl %%ecx, 0x08(%%eax);"	\
1417 		"movl %%ebx, 0x04(%%eax);"	\
1418 		"popl 0x00(%%eax);"		\
1419 		"popal;"			\
1420 		::"a"(frame)			\
1421 	)
1422 
1423 #define BACKDOOR_OP_AMD64(op, frame)		\
1424 	__asm__ volatile (			\
1425 		"pushq %%rbp;			\n\t" \
1426 		"pushq %%rax;			\n\t" \
1427 		"movq 0x30(%%rax), %%rbp;	\n\t" \
1428 		"movq 0x28(%%rax), %%rdi;	\n\t" \
1429 		"movq 0x20(%%rax), %%rsi;	\n\t" \
1430 		"movq 0x18(%%rax), %%rdx;	\n\t" \
1431 		"movq 0x10(%%rax), %%rcx;	\n\t" \
1432 		"movq 0x08(%%rax), %%rbx;	\n\t" \
1433 		"movq 0x00(%%rax), %%rax;	\n\t" \
1434 		op				"\n\t" \
1435 		"xchgq %%rax, 0x00(%%rsp);	\n\t" \
1436 		"movq %%rbp, 0x30(%%rax);	\n\t" \
1437 		"movq %%rdi, 0x28(%%rax);	\n\t" \
1438 		"movq %%rsi, 0x20(%%rax);	\n\t" \
1439 		"movq %%rdx, 0x18(%%rax);	\n\t" \
1440 		"movq %%rcx, 0x10(%%rax);	\n\t" \
1441 		"movq %%rbx, 0x08(%%rax);	\n\t" \
1442 		"popq 0x00(%%rax);		\n\t" \
1443 		"popq %%rbp;			\n\t" \
1444 		: /* No outputs. */ : "a" (frame) \
1445 		  /* No pushal on amd64 so warn gcc about the clobbered registers. */ \
1446 		: "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \
1447 	)
1448 
1449 
1450 #ifdef __i386__
1451 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame)
1452 #else
1453 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame)
1454 #endif
1455 
1456 void
1457 vm_cmd(struct vm_backdoor *frame)
1458 {
1459 	BACKDOOR_OP("inl %%dx, %%eax;", frame);
1460 }
1461 
1462 void
1463 vm_ins(struct vm_backdoor *frame)
1464 {
1465 	BACKDOOR_OP("cld;\n\trep insb;", frame);
1466 }
1467 
1468 void
1469 vm_outs(struct vm_backdoor *frame)
1470 {
1471 	BACKDOOR_OP("cld;\n\trep outsb;", frame);
1472 }
1473 
1474 int
1475 vm_rpc_open(struct vm_rpc *rpc, uint32_t proto)
1476 {
1477 	struct vm_backdoor frame;
1478 
1479 	bzero(&frame, sizeof(frame));
1480 	frame.eax.word      = VM_MAGIC;
1481 	frame.ebx.word      = proto | VM_RPC_FLAG_COOKIE;
1482 	frame.ecx.part.low  = VM_CMD_RPC;
1483 	frame.ecx.part.high = VM_RPC_OPEN;
1484 	frame.edx.part.low  = VM_PORT_CMD;
1485 	frame.edx.part.high = 0;
1486 
1487 	vm_cmd(&frame);
1488 
1489 	if (frame.ecx.part.high != 1 || frame.edx.part.low != 0) {
1490 		/* open-vm-tools retries without VM_RPC_FLAG_COOKIE here.. */
1491 		DPRINTF("vmware: open failed, eax=%08x, ecx=%08x, edx=%08x\n",
1492 		    frame.eax.word, frame.ecx.word, frame.edx.word);
1493 		return EIO;
1494 	}
1495 
1496 	rpc->channel = frame.edx.part.high;
1497 	rpc->cookie1 = frame.esi.word;
1498 	rpc->cookie2 = frame.edi.word;
1499 
1500 	return 0;
1501 }
1502 
1503 int
1504 vm_rpc_close(struct vm_rpc *rpc)
1505 {
1506 	struct vm_backdoor frame;
1507 
1508 	bzero(&frame, sizeof(frame));
1509 	frame.eax.word      = VM_MAGIC;
1510 	frame.ebx.word      = 0;
1511 	frame.ecx.part.low  = VM_CMD_RPC;
1512 	frame.ecx.part.high = VM_RPC_CLOSE;
1513 	frame.edx.part.low  = VM_PORT_CMD;
1514 	frame.edx.part.high = rpc->channel;
1515 	frame.edi.word      = rpc->cookie2;
1516 	frame.esi.word      = rpc->cookie1;
1517 
1518 	vm_cmd(&frame);
1519 
1520 	if (frame.ecx.part.high == 0 || frame.ecx.part.low != 0) {
1521 		DPRINTF("vmware: close failed, eax=%08x, ecx=%08x\n",
1522 		    frame.eax.word, frame.ecx.word);
1523 		return EIO;
1524 	}
1525 
1526 	rpc->channel = 0;
1527 	rpc->cookie1 = 0;
1528 	rpc->cookie2 = 0;
1529 
1530 	return 0;
1531 }
1532 
1533 int
1534 vm_rpc_send(const struct vm_rpc *rpc, const uint8_t *buf, uint32_t length)
1535 {
1536 	struct vm_backdoor frame;
1537 
1538 	/* Send the length of the command. */
1539 	bzero(&frame, sizeof(frame));
1540 	frame.eax.word = VM_MAGIC;
1541 	frame.ebx.word = length;
1542 	frame.ecx.part.low  = VM_CMD_RPC;
1543 	frame.ecx.part.high = VM_RPC_SET_LENGTH;
1544 	frame.edx.part.low  = VM_PORT_CMD;
1545 	frame.edx.part.high = rpc->channel;
1546 	frame.esi.word = rpc->cookie1;
1547 	frame.edi.word = rpc->cookie2;
1548 
1549 	vm_cmd(&frame);
1550 
1551 	if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1552 		DPRINTF("vmware: sending length failed, eax=%08x, ecx=%08x\n",
1553 		    frame.eax.word, frame.ecx.word);
1554 		return EIO;
1555 	}
1556 
1557 	if (length == 0)
1558 		return 0; /* Only need to poke once if command is null. */
1559 
1560 	/* Send the command using enhanced RPC. */
1561 	bzero(&frame, sizeof(frame));
1562 	frame.eax.word = VM_MAGIC;
1563 	frame.ebx.word = VM_RPC_ENH_DATA;
1564 	frame.ecx.word = length;
1565 	frame.edx.part.low  = VM_PORT_RPC;
1566 	frame.edx.part.high = rpc->channel;
1567 	frame.ebp.word = rpc->cookie1;
1568 	frame.edi.word = rpc->cookie2;
1569 #ifdef __amd64__
1570 	frame.esi.quad = (uint64_t)buf;
1571 #else
1572 	frame.esi.word = (uint32_t)buf;
1573 #endif
1574 
1575 	vm_outs(&frame);
1576 
1577 	if (frame.ebx.word != VM_RPC_ENH_DATA) {
1578 		/* open-vm-tools retries on VM_RPC_REPLY_CHECKPOINT */
1579 		DPRINTF("vmware: send failed, ebx=%08x\n", frame.ebx.word);
1580 		return EIO;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 int
1587 vm_rpc_send_str(const struct vm_rpc *rpc, const uint8_t *str)
1588 {
1589 	return vm_rpc_send(rpc, str, strlen(str));
1590 }
1591 
1592 int
1593 vm_rpc_get_data(const struct vm_rpc *rpc, char *data, uint32_t length,
1594     uint16_t dataid)
1595 {
1596 	struct vm_backdoor frame;
1597 
1598 	/* Get data using enhanced RPC. */
1599 	bzero(&frame, sizeof(frame));
1600 	frame.eax.word      = VM_MAGIC;
1601 	frame.ebx.word      = VM_RPC_ENH_DATA;
1602 	frame.ecx.word      = length;
1603 	frame.edx.part.low  = VM_PORT_RPC;
1604 	frame.edx.part.high = rpc->channel;
1605 	frame.esi.word      = rpc->cookie1;
1606 #ifdef __amd64__
1607 	frame.edi.quad      = (uint64_t)data;
1608 #else
1609 	frame.edi.word      = (uint32_t)data;
1610 #endif
1611 	frame.ebp.word      = rpc->cookie2;
1612 
1613 	vm_ins(&frame);
1614 
1615 	/* NUL-terminate the data */
1616 	data[length] = '\0';
1617 
1618 	if (frame.ebx.word != VM_RPC_ENH_DATA) {
1619 		DPRINTF("vmware: get data failed, ebx=%08x\n",
1620 		    frame.ebx.word);
1621 		return EIO;
1622 	}
1623 
1624 	/* Acknowledge data received. */
1625 	bzero(&frame, sizeof(frame));
1626 	frame.eax.word      = VM_MAGIC;
1627 	frame.ebx.word      = dataid;
1628 	frame.ecx.part.low  = VM_CMD_RPC;
1629 	frame.ecx.part.high = VM_RPC_GET_END;
1630 	frame.edx.part.low  = VM_PORT_CMD;
1631 	frame.edx.part.high = rpc->channel;
1632 	frame.esi.word      = rpc->cookie1;
1633 	frame.edi.word      = rpc->cookie2;
1634 
1635 	vm_cmd(&frame);
1636 
1637 	if (frame.ecx.part.high == 0) {
1638 		DPRINTF("vmware: ack data failed, eax=%08x, ecx=%08x\n",
1639 		    frame.eax.word, frame.ecx.word);
1640 		return EIO;
1641 	}
1642 
1643 	return 0;
1644 }
1645 
1646 int
1647 vm_rpc_get_length(const struct vm_rpc *rpc, uint32_t *length, uint16_t *dataid)
1648 {
1649 	struct vm_backdoor frame;
1650 
1651 	bzero(&frame, sizeof(frame));
1652 	frame.eax.word      = VM_MAGIC;
1653 	frame.ebx.word      = 0;
1654 	frame.ecx.part.low  = VM_CMD_RPC;
1655 	frame.ecx.part.high = VM_RPC_GET_LENGTH;
1656 	frame.edx.part.low  = VM_PORT_CMD;
1657 	frame.edx.part.high = rpc->channel;
1658 	frame.esi.word      = rpc->cookie1;
1659 	frame.edi.word      = rpc->cookie2;
1660 
1661 	vm_cmd(&frame);
1662 
1663 	if ((frame.ecx.part.high & VM_RPC_REPLY_SUCCESS) == 0) {
1664 		DPRINTF("vmware: get length failed, eax=%08x, ecx=%08x\n",
1665 		    frame.eax.word, frame.ecx.word);
1666 		return EIO;
1667 	}
1668 	if ((frame.ecx.part.high & VM_RPC_REPLY_DORECV) == 0) {
1669 		*length = 0;
1670 		*dataid = 0;
1671 	} else {
1672 		*length = frame.ebx.word;
1673 		*dataid = frame.edx.part.high;
1674 	}
1675 
1676 	return 0;
1677 }
1678 
1679 int
1680 vm_rpci_response_successful(struct vmt_softc *sc)
1681 {
1682 	return (sc->sc_rpc_buf[0] == '1' && sc->sc_rpc_buf[1] == ' ');
1683 }
1684 
1685 int
1686 vm_rpc_send_rpci_tx_buf(struct vmt_softc *sc, const uint8_t *buf,
1687     uint32_t length)
1688 {
1689 	struct vm_rpc rpci;
1690 	u_int32_t rlen;
1691 	u_int16_t ack;
1692 	int result = 0;
1693 
1694 	if (vm_rpc_open(&rpci, VM_RPC_OPEN_RPCI) != 0) {
1695 		DPRINTF("%s: rpci channel open failed\n", DEVNAME(sc));
1696 		return EIO;
1697 	}
1698 
1699 	if (vm_rpc_send(&rpci, buf, length) != 0) {
1700 		DPRINTF("%s: unable to send rpci command\n", DEVNAME(sc));
1701 		result = EIO;
1702 		goto out;
1703 	}
1704 
1705 	if (vm_rpc_get_length(&rpci, &rlen, &ack) != 0) {
1706 		DPRINTF("%s: failed to get length of rpci response data\n",
1707 		    DEVNAME(sc));
1708 		result = EIO;
1709 		goto out;
1710 	}
1711 
1712 	if (rlen > 0) {
1713 		if (rlen >= VMT_RPC_BUFLEN) {
1714 			rlen = VMT_RPC_BUFLEN - 1;
1715 		}
1716 
1717 		if (vm_rpc_get_data(&rpci, sc->sc_rpc_buf, rlen, ack) != 0) {
1718 			DPRINTF("%s: failed to get rpci response data\n",
1719 			    DEVNAME(sc));
1720 			result = EIO;
1721 			goto out;
1722 		}
1723 	}
1724 
1725 out:
1726 	if (vm_rpc_close(&rpci) != 0) {
1727 		DPRINTF("%s: unable to close rpci channel\n", DEVNAME(sc));
1728 	}
1729 
1730 	return result;
1731 }
1732 
1733 int
1734 vm_rpc_send_rpci_tx(struct vmt_softc *sc, const char *fmt, ...)
1735 {
1736 	va_list args;
1737 	int len;
1738 
1739 	va_start(args, fmt);
1740 	len = vsnprintf(sc->sc_rpc_buf, VMT_RPC_BUFLEN, fmt, args);
1741 	va_end(args);
1742 
1743 	if (len >= VMT_RPC_BUFLEN) {
1744 		DPRINTF("%s: rpci command didn't fit in buffer\n", DEVNAME(sc));
1745 		return EIO;
1746 	}
1747 
1748 	return vm_rpc_send_rpci_tx_buf(sc, sc->sc_rpc_buf, len);
1749 }
1750 
1751 #if 0
1752 	struct vm_backdoor frame;
1753 
1754 	bzero(&frame, sizeof(frame));
1755 
1756 	frame.eax.word = VM_MAGIC;
1757 	frame.ecx.part.low = VM_CMD_GET_VERSION;
1758 	frame.edx.part.low  = VM_PORT_CMD;
1759 
1760 	printf("\n");
1761 	printf("eax 0x%08x\n", frame.eax.word);
1762 	printf("ebx 0x%08x\n", frame.ebx.word);
1763 	printf("ecx 0x%08x\n", frame.ecx.word);
1764 	printf("edx 0x%08x\n", frame.edx.word);
1765 	printf("ebp 0x%08x\n", frame.ebp.word);
1766 	printf("edi 0x%08x\n", frame.edi.word);
1767 	printf("esi 0x%08x\n", frame.esi.word);
1768 
1769 	vm_cmd(&frame);
1770 
1771 	printf("-\n");
1772 	printf("eax 0x%08x\n", frame.eax.word);
1773 	printf("ebx 0x%08x\n", frame.ebx.word);
1774 	printf("ecx 0x%08x\n", frame.ecx.word);
1775 	printf("edx 0x%08x\n", frame.edx.word);
1776 	printf("ebp 0x%08x\n", frame.ebp.word);
1777 	printf("edi 0x%08x\n", frame.edi.word);
1778 	printf("esi 0x%08x\n", frame.esi.word);
1779 #endif
1780 
1781 /*
1782  * Notes on tracing backdoor activity in vmware-guestd:
1783  *
1784  * - Find the addresses of the inl / rep insb / rep outsb
1785  *   instructions used to perform backdoor operations.
1786  *   One way to do this is to disassemble vmware-guestd:
1787  *
1788  *   $ objdump -S /emul/freebsd/sbin/vmware-guestd > vmware-guestd.S
1789  *
1790  *   and search for '<tab>in ' in the resulting file.  The rep insb and
1791  *   rep outsb code is directly below that.
1792  *
1793  * - Run vmware-guestd under gdb, setting up breakpoints as follows:
1794  *   (the addresses shown here are the ones from VMware-server-1.0.10-203137,
1795  *   the last version that actually works in FreeBSD emulation on OpenBSD)
1796  *
1797  * break *0x805497b   (address of 'in' instruction)
1798  * commands 1
1799  * silent
1800  * echo INOUT\n
1801  * print/x $ecx
1802  * print/x $ebx
1803  * print/x $edx
1804  * continue
1805  * end
1806  * break *0x805497c   (address of instruction after 'in')
1807  * commands 2
1808  * silent
1809  * echo ===\n
1810  * print/x $ecx
1811  * print/x $ebx
1812  * print/x $edx
1813  * echo \n
1814  * continue
1815  * end
1816  * break *0x80549b7   (address of instruction before 'rep insb')
1817  * commands 3
1818  * silent
1819  * set variable $inaddr = $edi
1820  * set variable $incount = $ecx
1821  * continue
1822  * end
1823  * break *0x80549ba   (address of instruction after 'rep insb')
1824  * commands 4
1825  * silent
1826  * echo IN\n
1827  * print $incount
1828  * x/s $inaddr
1829  * echo \n
1830  * continue
1831  * end
1832  * break *0x80549fb    (address of instruction before 'rep outsb')
1833  * commands 5
1834  * silent
1835  * echo OUT\n
1836  * print $ecx
1837  * x/s $esi
1838  * echo \n
1839  * continue
1840  * end
1841  *
1842  * This will produce a log of the backdoor operations, including the
1843  * data sent and received and the relevant register values.  You can then
1844  * match the register values to the various constants in this file.
1845  */
1846