xref: /netbsd-src/sys/dev/vmt/vmtvar.h (revision 58b879d3a8445c8340f6016bd2b447dda97e5200)
1 /* $NetBSD: vmtvar.h,v 1.2 2021/03/27 21:23:14 ryo Exp $ */
2 /* NetBSD: vmt.c,v 1.15 2016/11/10 03:32:04 ozaki-r Exp */
3 /* $OpenBSD: vmt.c,v 1.11 2011/01/27 21:29:25 dtucker Exp $ */
4 
5 /*
6  * Copyright (c) 2007 David Crawshaw <david@zentus.com>
7  * Copyright (c) 2008 David Gwynne <dlg@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #ifndef _DEV_VMT_VMTVAR_H_
23 #define _DEV_VMT_VMTVAR_H_
24 
25 #include <sys/uuid.h>
26 #include <dev/sysmon/sysmonvar.h>
27 
28 /* A register frame. */
29 /* XXX 'volatile' as a workaround because BACKDOOR_OP is likely broken */
30 struct vm_backdoor {
31 	volatile register_t eax;
32 	volatile register_t ebx;
33 	volatile register_t ecx;
34 	volatile register_t edx;
35 	volatile register_t esi;
36 	volatile register_t edi;
37 	volatile register_t ebp;
38 };
39 
40 #define VM_REG_LOW_MASK		__BITS(15,0)
41 #define VM_REG_HIGH_MASK	__BITS(31,16)
42 #define VM_REG_WORD_MASK	__BITS(31,0)
43 #define VM_REG_CMD(hi, low)	\
44 	(__SHIFTIN((hi), VM_REG_HIGH_MASK) | __SHIFTIN((low), VM_REG_LOW_MASK))
45 #define VM_REG_CMD_RPC(cmd)	VM_REG_CMD((cmd), VM_CMD_RPC)
46 #define VM_REG_PORT_CMD(cmd)	VM_REG_CMD((cmd), VM_PORT_CMD)
47 #define VM_REG_PORT_RPC(cmd)	VM_REG_CMD((cmd), VM_PORT_RPC)
48 
49 /* RPC context. */
50 struct vm_rpc {
51 	uint16_t channel;
52 	uint32_t cookie1;
53 	uint32_t cookie2;
54 };
55 
56 struct vmt_event {
57 	struct sysmon_pswitch	ev_smpsw;
58 	int			ev_code;
59 };
60 
61 struct vmt_softc {
62 	device_t		sc_dev;
63 
64 	struct sysctllog	*sc_log;
65 	struct vm_rpc		sc_tclo_rpc;
66 	bool			sc_tclo_rpc_open;
67 	char			*sc_rpc_buf;
68 	int			sc_rpc_error;
69 	int			sc_tclo_ping;
70 	int			sc_set_guest_os;
71 #define VMT_RPC_BUFLEN			256
72 
73 	struct callout		sc_tick;
74 	struct callout		sc_tclo_tick;
75 
76 #define VMT_CLOCK_SYNC_PERIOD_SECONDS 60
77 	int			sc_clock_sync_period_seconds;
78 	struct callout		sc_clock_sync_tick;
79 
80 	struct vmt_event	sc_ev_power;
81 	struct vmt_event	sc_ev_reset;
82 	struct vmt_event	sc_ev_sleep;
83 	bool			sc_smpsw_valid;
84 
85 	char			sc_hostname[MAXHOSTNAMELEN];
86 	char			sc_uuid[_UUID_STR_LEN];
87 };
88 
89 bool vmt_probe(void);
90 void vmt_common_attach(struct vmt_softc *);
91 int vmt_common_detach(struct vmt_softc *);
92 
93 #define BACKDOOR_OP_I386(op, frame)		\
94 	__asm__ __volatile__ (			\
95 		"pushal;"			\
96 		"pushl %%eax;"			\
97 		"movl 0x18(%%eax), %%ebp;"	\
98 		"movl 0x14(%%eax), %%edi;"	\
99 		"movl 0x10(%%eax), %%esi;"	\
100 		"movl 0x0c(%%eax), %%edx;"	\
101 		"movl 0x08(%%eax), %%ecx;"	\
102 		"movl 0x04(%%eax), %%ebx;"	\
103 		"movl 0x00(%%eax), %%eax;"	\
104 		op				\
105 		"xchgl %%eax, 0x00(%%esp);"	\
106 		"movl %%ebp, 0x18(%%eax);"	\
107 		"movl %%edi, 0x14(%%eax);"	\
108 		"movl %%esi, 0x10(%%eax);"	\
109 		"movl %%edx, 0x0c(%%eax);"	\
110 		"movl %%ecx, 0x08(%%eax);"	\
111 		"movl %%ebx, 0x04(%%eax);"	\
112 		"popl 0x00(%%eax);"		\
113 		"popal;"			\
114 		:				\
115 		:"a"(frame)			\
116 	)
117 
118 #define BACKDOOR_OP_AMD64(op, frame)		\
119 	__asm__ __volatile__ (			\
120 		"pushq %%rbp;			\n\t" \
121 		"pushq %%rax;			\n\t" \
122 		"movq 0x30(%%rax), %%rbp;	\n\t" \
123 		"movq 0x28(%%rax), %%rdi;	\n\t" \
124 		"movq 0x20(%%rax), %%rsi;	\n\t" \
125 		"movq 0x18(%%rax), %%rdx;	\n\t" \
126 		"movq 0x10(%%rax), %%rcx;	\n\t" \
127 		"movq 0x08(%%rax), %%rbx;	\n\t" \
128 		"movq 0x00(%%rax), %%rax;	\n\t" \
129 		op				"\n\t" \
130 		"xchgq %%rax, 0x00(%%rsp);	\n\t" \
131 		"movq %%rbp, 0x30(%%rax);	\n\t" \
132 		"movq %%rdi, 0x28(%%rax);	\n\t" \
133 		"movq %%rsi, 0x20(%%rax);	\n\t" \
134 		"movq %%rdx, 0x18(%%rax);	\n\t" \
135 		"movq %%rcx, 0x10(%%rax);	\n\t" \
136 		"movq %%rbx, 0x08(%%rax);	\n\t" \
137 		"popq 0x00(%%rax);		\n\t" \
138 		"popq %%rbp;			\n\t" \
139 		: /* No outputs. */ \
140 		: "a" (frame) \
141 		  /* No pushal on amd64 so warn gcc about the clobbered registers. */\
142 		: "rbx", "rcx", "rdx", "rdi", "rsi", "cc", "memory" \
143 	)
144 
145 #define X86_IO_MAGIC		0x86	/* magic for upper 32bit of x7 */
146 #define X86_IO_W7_SIZE_MASK	__BITS(1, 0)
147 #define X86_IO_W7_SIZE(n)	__SHIFTIN((n), X86_IO_W7_SIZE_MASK)
148 #define X86_IO_W7_DIR		__BIT(2)
149 #define X86_IO_W7_WITH		__BIT(3)
150 #define X86_IO_W7_STR		__BIT(4)
151 #define X86_IO_W7_DF		__BIT(5)
152 #define X86_IO_W7_IMM_MASK	__BITS(12, 5)
153 #define X86_IO_W7_IMM(imm)	__SHIFTIN((imm), X86_IO_W7_IMM_MASK)
154 #define BACKDOOR_OP_AARCH64(op, frame)		\
155 	__asm__ __volatile__ (			\
156 		"ldp x0, x1, [%0, 8 * 0];	\n\t" \
157 		"ldp x2, x3, [%0, 8 * 2];	\n\t" \
158 		"ldp x4, x5, [%0, 8 * 4];	\n\t" \
159 		"ldr x6,     [%0, 8 * 6];	\n\t" \
160 		"mov x7, %1			\n\t" \
161 		"movk x7, %2, lsl #32;		\n\t" \
162 		"mrs xzr, mdccsr_el0;		\n\t" \
163 		"stp x0, x1, [%0, 8 * 0];	\n\t" \
164 		"stp x2, x3, [%0, 8 * 2];	\n\t" \
165 		"stp x4, x5, [%0, 8 * 4];	\n\t" \
166 		"str x6,     [%0, 8 * 6];	\n\t" \
167 		: /* No outputs. */ \
168 		: "r" (frame), \
169 		  "r" (op), \
170 		  "i" (X86_IO_MAGIC) \
171 		: "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "memory" \
172 	)
173 
174 #if defined(__i386__)
175 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_I386(op, frame)
176 #elif defined(__amd64__)
177 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AMD64(op, frame)
178 #elif defined(__aarch64__)
179 #define BACKDOOR_OP(op, frame) BACKDOOR_OP_AARCH64(op, frame)
180 #endif
181 
182 #if defined(__i386__) || defined(__amd64__)
183 #define BACKDOOR_OP_CMD	"inl %%dx, %%eax;"
184 #define BACKDOOR_OP_IN	"cld;\n\trep insb;"
185 #define BACKDOOR_OP_OUT	"cld;\n\trep outsb;"
186 #elif defined(__aarch64__)
187 #define BACKDOOR_OP_CMD	(X86_IO_W7_WITH | X86_IO_W7_DIR | X86_IO_W7_SIZE(2))
188 #define BACKDOOR_OP_IN	(X86_IO_W7_WITH | X86_IO_W7_STR | X86_IO_W7_DIR)
189 #define BACKDOOR_OP_OUT	(X86_IO_W7_WITH | X86_IO_W7_STR)
190 #endif
191 
192 static __inline void
vmt_hvcall(uint8_t cmd,u_int regs[6])193 vmt_hvcall(uint8_t cmd, u_int regs[6])
194 {
195 	struct vm_backdoor frame;
196 
197 	memset(&frame, 0, sizeof(frame));
198 	frame.eax = VM_MAGIC;
199 	frame.ebx = UINT_MAX;
200 	frame.ecx = VM_REG_CMD(0, cmd);
201 	frame.edx = VM_REG_PORT_CMD(0);
202 
203 	BACKDOOR_OP(BACKDOOR_OP_CMD, &frame);
204 
205 	regs[0] = __SHIFTOUT(frame.eax, VM_REG_WORD_MASK);
206 	regs[1] = __SHIFTOUT(frame.ebx, VM_REG_WORD_MASK);
207 	regs[2] = __SHIFTOUT(frame.ecx, VM_REG_WORD_MASK);
208 	regs[3] = __SHIFTOUT(frame.edx, VM_REG_WORD_MASK);
209 	regs[4] = __SHIFTOUT(frame.esi, VM_REG_WORD_MASK);
210 	regs[5] = __SHIFTOUT(frame.edi, VM_REG_WORD_MASK);
211 }
212 
213 #endif /* _DEV_VMT_VMTVAR_H_ */
214