xref: /netbsd-src/sys/dev/nvmm/nvmm_internal.h (revision 9d35bdcab13b91c96c9e135ba98d51037d2d6c7d)
1 /*	$NetBSD: nvmm_internal.h,v 1.21 2022/09/13 20:10:04 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2018-2020 Maxime Villard, m00nbsd.net
5  * All rights reserved.
6  *
7  * This code is part of the NVMM hypervisor.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
25  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #ifndef _NVMM_INTERNAL_H_
32 #define _NVMM_INTERNAL_H_
33 
34 #include <sys/types.h>
35 
36 #include <sys/lwp.h>
37 #include <sys/mutex.h>
38 #include <sys/rwlock.h>
39 #include <sys/sched.h>
40 
41 #include <dev/nvmm/nvmm.h>
42 
43 struct uvm_object;
44 struct vmspace;
45 
46 #define NVMM_MAX_MACHINES	128
47 #define NVMM_MAX_VCPUS		256
48 #define NVMM_MAX_HMAPPINGS	32
49 #define NVMM_MAX_RAM		(128ULL * (1 << 30))
50 
51 struct nvmm_owner {
52 	pid_t pid;
53 };
54 
55 struct nvmm_cpu {
56 	/* Shared. */
57 	bool present;
58 	nvmm_cpuid_t cpuid;
59 	kmutex_t lock;
60 
61 	/* Comm page. */
62 	struct nvmm_comm_page *comm;
63 
64 	/* Last host CPU on which the VCPU ran. */
65 	int hcpu_last;
66 
67 	/* Implementation-specific. */
68 	void *cpudata;
69 };
70 
71 struct nvmm_hmapping {
72 	bool present;
73 	uintptr_t hva;
74 	size_t size;
75 	struct uvm_object *uobj;
76 };
77 
78 struct nvmm_machine {
79 	bool present;
80 	nvmm_machid_t machid;
81 	time_t time;
82 	struct nvmm_owner *owner;
83 	krwlock_t lock;
84 
85 	/* Comm */
86 	struct uvm_object *commuobj;
87 
88 	/* Kernel */
89 	struct vmspace *vm;
90 	gpaddr_t gpa_begin;
91 	gpaddr_t gpa_end;
92 
93 	/* Host Mappings */
94 	struct nvmm_hmapping hmap[NVMM_MAX_HMAPPINGS];
95 
96 	/* CPU */
97 	volatile unsigned int ncpus;
98 	struct nvmm_cpu cpus[NVMM_MAX_VCPUS];
99 
100 	/* Implementation-specific */
101 	void *machdata;
102 };
103 
104 struct nvmm_impl {
105 	const char *name;
106 	bool (*ident)(void);
107 	void (*init)(void);
108 	void (*fini)(void);
109 	void (*capability)(struct nvmm_capability *);
110 	void (*suspend_interrupt)(void);
111 	void (*suspend)(void);
112 	void (*resume)(void);
113 
114 	size_t mach_conf_max;
115 	const size_t *mach_conf_sizes;
116 
117 	size_t vcpu_conf_max;
118 	const size_t *vcpu_conf_sizes;
119 
120 	size_t state_size;
121 
122 	void (*machine_create)(struct nvmm_machine *);
123 	void (*machine_destroy)(struct nvmm_machine *);
124 	int (*machine_configure)(struct nvmm_machine *, uint64_t, void *);
125 	void (*machine_suspend)(struct nvmm_machine *);
126 	void (*machine_resume)(struct nvmm_machine *);
127 
128 	int (*vcpu_create)(struct nvmm_machine *, struct nvmm_cpu *);
129 	void (*vcpu_destroy)(struct nvmm_machine *, struct nvmm_cpu *);
130 	int (*vcpu_configure)(struct nvmm_cpu *, uint64_t, void *);
131 	void (*vcpu_setstate)(struct nvmm_cpu *);
132 	void (*vcpu_getstate)(struct nvmm_cpu *);
133 	int (*vcpu_inject)(struct nvmm_cpu *);
134 	int (*vcpu_run)(struct nvmm_machine *, struct nvmm_cpu *,
135 	    struct nvmm_vcpu_exit *);
136 	void (*vcpu_suspend)(struct nvmm_machine *, struct nvmm_cpu *);
137 	void (*vcpu_resume)(struct nvmm_machine *, struct nvmm_cpu *);
138 };
139 
140 #if defined(__x86_64__)
141 extern const struct nvmm_impl nvmm_x86_svm;
142 extern const struct nvmm_impl nvmm_x86_vmx;
143 #endif
144 
145 extern volatile bool nvmm_suspending;
146 
147 static inline bool
nvmm_return_needed(struct nvmm_cpu * vcpu,struct nvmm_vcpu_exit * exit)148 nvmm_return_needed(struct nvmm_cpu *vcpu, struct nvmm_vcpu_exit *exit)
149 {
150 
151 	if (preempt_needed()) {
152 		exit->reason = NVMM_VCPU_EXIT_NONE;
153 		return true;
154 	}
155 	if (curlwp->l_flag & LW_USERRET) {
156 		exit->reason = NVMM_VCPU_EXIT_NONE;
157 		return true;
158 	}
159 	if (vcpu->comm->stop) {
160 		exit->reason = NVMM_VCPU_EXIT_STOPPED;
161 		return true;
162 	}
163 	if (atomic_load_relaxed(&nvmm_suspending)) {
164 		exit->reason = NVMM_VCPU_EXIT_NONE;
165 		return true;
166 	}
167 
168 	return false;
169 }
170 
171 #endif /* _NVMM_INTERNAL_H_ */
172