xref: /netbsd-src/sys/arch/amd64/amd64/gdt.c (revision 8f18579d5d3b839aa8bc589e81898b445652fa31)
1 /*	$NetBSD: gdt.c,v 1.48 2022/08/20 23:48:50 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by John T. Kohl, by Charles M. Hannum, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Modified to deal with variable-length entries for NetBSD/x86_64 by
34  * fvdl@wasabisystems.com, may 2001
35  * XXX this file should be shared with the i386 code, the difference
36  * can be hidden in macros.
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.48 2022/08/20 23:48:50 riastradh Exp $");
41 
42 #include "opt_multiprocessor.h"
43 #include "opt_xen.h"
44 #include "opt_user_ldt.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/proc.h>
49 #include <sys/mutex.h>
50 #include <sys/cpu.h>
51 
52 #include <uvm/uvm.h>
53 
54 #include <machine/gdt.h>
55 #include <machine/pmap_private.h>
56 
57 #ifdef XENPV
58 #include <xen/hypervisor.h>
59 #endif
60 
61 #define NSLOTS(sz)	\
62 	(((sz) - DYNSEL_START) / sizeof(struct sys_segment_descriptor))
63 #define NDYNSLOTS	NSLOTS(MAXGDTSIZ)
64 
65 typedef struct {
66 	bool busy[NDYNSLOTS];
67 	size_t nslots;
68 } gdt_bitmap_t;
69 
70 /* size of GDT in bytes */
71 #ifdef XENPV
72 const size_t gdt_size = FIRST_RESERVED_GDT_BYTE;
73 #else
74 const size_t gdt_size = MAXGDTSIZ;
75 #endif
76 
77 /* bitmap of busy slots */
78 static gdt_bitmap_t gdt_bitmap;
79 
80 #if defined(USER_LDT) || !defined(XENPV)
81 static void set_sys_gdt(int, void *, size_t, int, int, int);
82 #endif
83 
84 void
update_descriptor(void * tp,void * ep)85 update_descriptor(void *tp, void *ep)
86 {
87 	uint64_t *table, *entry;
88 
89 	table = tp;
90 	entry = ep;
91 
92 #ifndef XENPV
93 	*table = *entry;
94 #else
95 	paddr_t pa;
96 
97 	if (!pmap_extract_ma(pmap_kernel(), (vaddr_t)table, &pa) ||
98 	    HYPERVISOR_update_descriptor(pa, *entry))
99 		panic("HYPERVISOR_update_descriptor failed");
100 #endif
101 }
102 
103 #if defined(USER_LDT) || !defined(XENPV)
104 /*
105  * Called on a newly-allocated GDT slot, so no race between CPUs.
106  */
107 static void
set_sys_gdt(int slot,void * base,size_t limit,int type,int dpl,int gran)108 set_sys_gdt(int slot, void *base, size_t limit, int type, int dpl, int gran)
109 {
110 	union {
111 		struct sys_segment_descriptor sd;
112 		uint64_t bits[2];
113 	} d;
114 	CPU_INFO_ITERATOR cii;
115 	struct cpu_info *ci;
116 	int idx;
117 
118 	set_sys_segment(&d.sd, base, limit, type, dpl, gran);
119 	idx = IDXSEL(GDYNSEL(slot, SEL_KPL));
120 	for (CPU_INFO_FOREACH(cii, ci)) {
121 		KASSERT(ci->ci_gdt != NULL);
122 		update_descriptor(&ci->ci_gdt[idx + 0], &d.bits[0]);
123 		update_descriptor(&ci->ci_gdt[idx + 1], &d.bits[1]);
124 	}
125 }
126 #endif	/* USER_LDT || !XENPV */
127 
128 /*
129  * Initialize the GDT. We already have a gdtstore, which was temporarily used
130  * by the bootstrap code. Now, we allocate a new gdtstore, and put it in cpu0.
131  */
132 void
gdt_init(void)133 gdt_init(void)
134 {
135 	char *old_gdt;
136 	struct cpu_info *ci = &cpu_info_primary;
137 
138 	/* Initialize the global values */
139 	memset(&gdt_bitmap.busy, 0, sizeof(gdt_bitmap.busy));
140 	gdt_bitmap.nslots = NSLOTS(gdt_size);
141 
142 	old_gdt = gdtstore;
143 
144 #ifdef __HAVE_PCPU_AREA
145 	/* The GDT is part of the pcpuarea */
146 	gdtstore = (char *)&pcpuarea->ent[cpu_index(ci)].gdt;
147 #else
148 	struct vm_page *pg;
149 	vaddr_t va;
150 
151 	/* Allocate gdt_size bytes of memory. */
152 	gdtstore = (char *)uvm_km_alloc(kernel_map, gdt_size, 0,
153 	    UVM_KMF_VAONLY);
154 	for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + gdt_size;
155 	    va += PAGE_SIZE) {
156 		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
157 		if (pg == NULL) {
158 			panic("gdt_init: no pages");
159 		}
160 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
161 		    VM_PROT_READ | VM_PROT_WRITE, 0);
162 	}
163 	pmap_update(pmap_kernel());
164 #endif
165 
166 	/* Copy the initial bootstrap GDT into the new area. */
167 	memcpy(gdtstore, old_gdt, DYNSEL_START);
168 	ci->ci_gdt = (void *)gdtstore;
169 #ifndef XENPV
170 	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
171 	    LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);
172 #endif
173 
174 	gdt_init_cpu(ci);
175 }
176 
177 /*
178  * Allocate shadow GDT for a secondary CPU. It contains the same values as the
179  * GDT present in cpu0 (gdtstore).
180  */
181 void
gdt_alloc_cpu(struct cpu_info * ci)182 gdt_alloc_cpu(struct cpu_info *ci)
183 {
184 #ifdef __HAVE_PCPU_AREA
185 	ci->ci_gdt = (union descriptor *)&pcpuarea->ent[cpu_index(ci)].gdt;
186 #else
187 	struct vm_page *pg;
188 	vaddr_t va;
189 
190 	ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, gdt_size,
191 	    0, UVM_KMF_VAONLY);
192 	for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + gdt_size;
193 	    va += PAGE_SIZE) {
194 		while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
195 		    == NULL) {
196 			uvm_wait("gdt_alloc_cpu");
197 		}
198 		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
199 		    VM_PROT_READ | VM_PROT_WRITE, 0);
200 	}
201 	pmap_update(pmap_kernel());
202 #endif
203 
204 	memcpy(ci->ci_gdt, gdtstore, gdt_size);
205 }
206 
207 /*
208  * Load appropriate GDT descriptor into the currently running CPU, which must
209  * be ci.
210  */
211 void
gdt_init_cpu(struct cpu_info * ci)212 gdt_init_cpu(struct cpu_info *ci)
213 {
214 	struct region_descriptor region;
215 
216 	KASSERT(curcpu() == ci);
217 
218 	setregion(&region, ci->ci_gdt, (uint16_t)(gdt_size - 1));
219 	lgdt(&region);
220 }
221 
222 #if !defined(XENPV) || defined(USER_LDT)
223 static int
gdt_get_slot(void)224 gdt_get_slot(void)
225 {
226 	size_t i;
227 
228 	KASSERT(mutex_owned(&cpu_lock));
229 
230 	for (i = 0; i < gdt_bitmap.nslots; i++) {
231 		if (!gdt_bitmap.busy[i]) {
232 			gdt_bitmap.busy[i] = true;
233 			return (int)i;
234 		}
235 	}
236 	panic("gdt_get_slot: out of memory");
237 
238 	/* NOTREACHED */
239 	return 0;
240 }
241 
242 static void
gdt_put_slot(int slot)243 gdt_put_slot(int slot)
244 {
245 	KASSERT(mutex_owned(&cpu_lock));
246 	KASSERT(slot < gdt_bitmap.nslots);
247 	gdt_bitmap.busy[slot] = false;
248 }
249 #endif
250 
251 int
tss_alloc(struct x86_64_tss * tss)252 tss_alloc(struct x86_64_tss *tss)
253 {
254 #ifndef XENPV
255 	int slot;
256 
257 	mutex_enter(&cpu_lock);
258 
259 	slot = gdt_get_slot();
260 	set_sys_gdt(slot, tss, sizeof(struct x86_64_tss) - 1, SDT_SYS386TSS,
261 	    SEL_KPL, 0);
262 
263 	mutex_exit(&cpu_lock);
264 
265 	return GDYNSEL(slot, SEL_KPL);
266 #else
267 	/* TSS, what for? */
268 	return GSEL(GNULL_SEL, SEL_KPL);
269 #endif
270 }
271 
272 void
tss_free(int sel)273 tss_free(int sel)
274 {
275 #ifndef XENPV
276 	mutex_enter(&cpu_lock);
277 	gdt_put_slot(IDXDYNSEL(sel));
278 	mutex_exit(&cpu_lock);
279 #else
280 	KASSERT(sel == GSEL(GNULL_SEL, SEL_KPL));
281 #endif
282 }
283 
284 #ifdef USER_LDT
285 int
ldt_alloc(void * ldtp,size_t len)286 ldt_alloc(void *ldtp, size_t len)
287 {
288 	int slot;
289 
290 	KASSERT(mutex_owned(&cpu_lock));
291 
292 	slot = gdt_get_slot();
293 	set_sys_gdt(slot, ldtp, len - 1, SDT_SYSLDT, SEL_KPL, 0);
294 
295 	return GDYNSEL(slot, SEL_KPL);
296 }
297 
298 void
ldt_free(int sel)299 ldt_free(int sel)
300 {
301 	int slot;
302 
303 	KASSERT(mutex_owned(&cpu_lock));
304 
305 	slot = IDXDYNSEL(sel);
306 
307 	gdt_put_slot(slot);
308 }
309 #endif
310 
311 #ifdef XENPV
312 void
lgdt(struct region_descriptor * desc)313 lgdt(struct region_descriptor *desc)
314 {
315 	paddr_t frames[16];
316 	size_t i;
317 	vaddr_t va;
318 
319 	/*
320 	 * Xen even checks descriptors AFTER limit. Zero out last frame after
321 	 * limit if needed.
322 	 */
323 	va = desc->rd_base + desc->rd_limit + 1;
324 	memset((void *)va, 0, roundup(va, PAGE_SIZE) - va);
325 
326 	/*
327 	 * The lgdt instruction uses virtual addresses, do some translation for
328 	 * Xen. Mark pages R/O too, otherwise Xen will refuse to use them.
329 	 */
330 	for (i = 0; i < roundup(desc->rd_limit, PAGE_SIZE) >> PAGE_SHIFT; i++) {
331 		va = desc->rd_base + (i << PAGE_SHIFT);
332 		frames[i] = ((paddr_t)xpmap_ptetomach((pt_entry_t *)va)) >>
333 		    PAGE_SHIFT;
334 		pmap_pte_clearbits(kvtopte(va), PTE_W);
335 	}
336 
337 	if (HYPERVISOR_set_gdt(frames, (desc->rd_limit + 1) >> 3))
338 		panic("lgdt(): HYPERVISOR_set_gdt() failed");
339 	lgdt_finish();
340 }
341 #endif
342