xref: /netbsd-src/sys/dev/vmt/vmtvar.h (revision 73d56d5b0be8704e4f0a7e8221a2c7309572c9a1)
1 /* $NetBSD: vmtvar.h,v 1.1 2020/10/27 08:57:11 ryo Exp $ */
2 /* NetBSD: vmt.c,v 1.15 2016/11/10 03:32:04 ozaki-r Exp */
3 /* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */
4 
5 /*
6  * Copyright (c) 2007 David Crawshaw <david@zentus.com>
7  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #ifndef _DEV_VMT_VMTVAR_H_
23 #define _DEV_VMT_VMTVAR_H_
24 
25 #include <sys/uuid.h>
26 #include <dev/sysmon/sysmonvar.h>
27 
28 /* XXX: depend on little-endian */
29 /* A register. */
30 union vm_reg {
31 	struct {
32 		uint16_t low;
33 		uint16_t high;
34 	} part;
35 	uint32_t word;
36 #if defined(__amd64__) || defined(__aarch64__)
37 	struct {
38 		uint32_t low;
39 		uint32_t high;
40 	} words;
41 	uint64_t quad;
42 #endif
43 } __packed;
44 
45 /* A register frame. */
46 /* XXX 'volatile' as a workaround because BACKDOOR_OP is likely broken */
47 struct vm_backdoor {
48 	volatile union vm_reg eax;
49 	volatile union vm_reg ebx;
50 	volatile union vm_reg ecx;
51 	volatile union vm_reg edx;
52 	volatile union vm_reg esi;
53 	volatile union vm_reg edi;
54 	volatile union vm_reg ebp;
55 } __packed;
56 
57 /* RPC context. */
58 struct vm_rpc {
59 	uint16_t channel;
60 	uint32_t cookie1;
61 	uint32_t cookie2;
62 };
63 
64 struct vmt_event {
65 	struct sysmon_pswitch	ev_smpsw;
66 	int			ev_code;
67 };
68 
69 struct vmt_softc {
70 	device_t		sc_dev;
71 
72 	struct sysctllog	*sc_log;
73 	struct vm_rpc		sc_tclo_rpc;
74 	bool			sc_tclo_rpc_open;
75 	char			*sc_rpc_buf;
76 	int			sc_rpc_error;
77 	int			sc_tclo_ping;
78 	int			sc_set_guest_os;
79 #define VMT_RPC_BUFLEN			256
80 
81 	struct callout		sc_tick;
82 	struct callout		sc_tclo_tick;
83 
84 #define VMT_CLOCK_SYNC_PERIOD_SECONDS 60
85 	int			sc_clock_sync_period_seconds;
86 	struct callout		sc_clock_sync_tick;
87 
88 	struct vmt_event	sc_ev_power;
89 	struct vmt_event	sc_ev_reset;
90 	struct vmt_event	sc_ev_sleep;
91 	bool			sc_smpsw_valid;
92 
93 	char			sc_hostname[MAXHOSTNAMELEN];
94 	char			sc_uuid[_UUID_STR_LEN];
95 };
96 
97 bool vmt_probe(void);
98 void vmt_common_attach(struct vmt_softc *);
99 int vmt_common_detach(struct vmt_softc *);
100 
101 #define BACKDOOR_OP_I386(op, frame)		\
102 	__asm__ __volatile__ (			\
103 		"pushal;"			\
104 		"pushl %%eax;"			\
105 		"movl 0x18(%%eax), %%ebp;"	\
106 		"movl 0x14(%%eax), %%edi;"	\
107 		"movl 0x10(%%eax), %%esi;"	\
108 		"movl 0x0c(%%eax), %%edx;"	\
109 		"movl 0x08(%%eax), %%ecx;"	\
110 		"movl 0x04(%%eax), %%ebx;"	\
111 		"movl 0x00(%%eax), %%eax;"	\
112 		op				\
113 		"xchgl %%eax, 0x00(%%esp);"	\
114 		"movl %%ebp, 0x18(%%eax);"	\
115 		"movl %%edi, 0x14(%%eax);"	\
116 		"movl %%esi, 0x10(%%eax);"	\
117 		"movl %%edx, 0x0c(%%eax);"	\
118 		"movl %%ecx, 0x08(%%eax);"	\
119 		"movl %%ebx, 0x04(%%eax);"	\
120 		"popl 0x00(%%eax);"		\
121 		"popal;"			\
122 		:				\
123 		:"a"(frame)			\
124 	)
125 
126 #define BACKDOOR_OP_AMD64(op, frame)		\
127 	__asm__ __volatile__ (			\
128 		"pushq %%rbp;			\n\t" \
129 		"pushq %%rax;			\n\t" \
130 		"movq 0x30(%%rax), %%rbp;	\n\t" \
131 		"movq 0x28(%%rax), %%rdi;	\n\t" \
132 		"movq 0x20(%%rax), %%rsi;	\n\t" \
133 		"movq 0x18(%%rax), %%rdx;	\n\t" \
134 		"movq 0x10(%%rax), %%rcx;	\n\t" \
135 		"movq 0x08(%%rax), %%rbx;	\n\t" \
136 		"movq 0x00(%%rax), %%rax;	\n\t" \
137 		op				"\n\t" \
138 		"xchgq %%rax, 0x00(%%rsp);	\n\t" \
139 		"movq %%rbp, 0x30(%%rax);	\n\t" \
140 		"movq %%rdi, 0x28(%%rax);	\n\t" \
141 		"movq %%rsi, 0x20(%%rax);	\n\t" \
142 		"movq %%rdx, 0x18(%%rax);	\n\t" \
143 		"movq %%rcx, 0x10(%%rax);	\n\t" \
144 		"movq %%rbx, 0x08(%%rax);	\n\t" \
145 		"popq 0x00(%%rax);		\n\t" \
146 		"popq %%rbp;			\n\t" \
147 		: /* No outputs. */ \
148 		: "a" (frame) \
149 		  /* No pushal on amd64 so warn gcc about the clobbered registers. */\
150 		: "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \
151 	)
152 
153 #define X86_IO_MAGIC		0x86	/* magic for upper 32bit of x7 */
154 #define X86_IO_W7_SIZE_MASK	__BITS(1, 0)
155 #define X86_IO_W7_SIZE(n)	__SHIFTIN((n), X86_IO_W7_SIZE_MASK)
156 #define X86_IO_W7_DIR		__BIT(2)
157 #define X86_IO_W7_WITH		__BIT(3)
158 #define X86_IO_W7_STR		__BIT(4)
159 #define X86_IO_W7_DF		__BIT(5)
160 #define X86_IO_W7_IMM_MASK	__BITS(12, 5)
161 #define X86_IO_W7_IMM(imm)	__SHIFTIN((imm), X86_IO_W7_IMM_MASK)
162 #define BACKDOOR_OP_AARCH64(op, frame)		\
163 	__asm__ __volatile__ (			\
164 		"ldp x0, x1, [%0, 8 * 0];	\n\t" \
165 		"ldp x2, x3, [%0, 8 * 2];	\n\t" \
166 		"ldp x4, x5, [%0, 8 * 4];	\n\t" \
167 		"ldr x6,     [%0, 8 * 6];	\n\t" \
168 		"mov x7, %1			\n\t" \
169 		"movk x7, %2, lsl #32;		\n\t" \
170 		"mrs xzr, mdccsr_el0;		\n\t" \
171 		"stp x0, x1, [%0, 8 * 0];	\n\t" \
172 		"stp x2, x3, [%0, 8 * 2];	\n\t" \
173 		"stp x4, x5, [%0, 8 * 4];	\n\t" \
174 		"str x6,     [%0, 8 * 6];	\n\t" \
175 		: /* No outputs. */ \
176 		: "r" (frame), \
177 		  "r" (op), \
178 		  "i" (X86_IO_MAGIC) \
179 		: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "memory" \
180 	)
181 
182 #if defined(__i386__)
183 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame)
184 #elif defined(__amd64__)
185 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame)
186 #elif defined(__aarch64__)
187 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AARCH64(op, frame)
188 #endif
189 
190 #if defined(__i386__) || defined(__amd64__)
191 #define BACKDOOR_OP_CMD	"inl %%dx, %%eax;"
192 #define BACKDOOR_OP_IN	"cld;\n\trep insb;"
193 #define BACKDOOR_OP_OUT	"cld;\n\trep outsb;"
194 #elif defined(__aarch64__)
195 #define BACKDOOR_OP_CMD	(X86_IO_W7_WITH | X86_IO_W7_DIR | X86_IO_W7_SIZE(2))
196 #define BACKDOOR_OP_IN	(X86_IO_W7_WITH | X86_IO_W7_STR | X86_IO_W7_DIR)
197 #define BACKDOOR_OP_OUT	(X86_IO_W7_WITH | X86_IO_W7_STR)
198 #endif
199 
200 static __inline void
201 vmt_hvcall(uint8_t cmd, u_int regs[6])
202 {
203 	struct vm_backdoor frame;
204 
205 	memset(&frame, 0, sizeof(frame));
206 	frame.eax.word = VM_MAGIC;
207 	frame.ebx.word = UINT_MAX;
208 	frame.ecx.part.low = cmd;
209 	frame.edx.part.low = VM_PORT_CMD;
210 
211 	BACKDOOR_OP(BACKDOOR_OP_CMD, &frame);
212 
213 	regs[0] = frame.eax.word;
214 	regs[1] = frame.ebx.word;
215 	regs[2] = frame.ecx.word;
216 	regs[3] = frame.edx.word;
217 	regs[4] = frame.esi.word;
218 	regs[5] = frame.edi.word;
219 }
220 
221 #endif /* _DEV_VMT_VMTVAR_H_ */
222